aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/oprofile/common.c22
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/Kconfig65
-rw-r--r--arch/arm/Kconfig.debug574
-rw-r--r--arch/arm/boot/compressed/head-shmobile.S43
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts29
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts29
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts19
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi143
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts1
-rw-r--r--arch/arm/boot/dts/keystone.dts29
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts2
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts6
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts78
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi8
-rw-r--r--arch/arm/boot/dts/skeleton64.dtsi2
-rw-r--r--arch/arm/boot/dts/stih41x.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-512.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts3
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts3
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts6
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi28
-rw-r--r--arch/arm/boot/dts/wm8850-w70v2.dts3
-rw-r--r--arch/arm/common/mcpm_head.S2
-rw-r--r--arch/arm/common/vlock.S4
-rw-r--r--arch/arm/configs/bockw_defconfig7
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/marzen_defconfig7
-rw-r--r--arch/arm/configs/omap2plus_defconfig13
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/include/asm/arch_timer.h14
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/cacheflush.h5
-rw-r--r--arch/arm/include/asm/dma-contiguous.h2
-rw-r--r--arch/arm/include/asm/elf.h2
-rw-r--r--arch/arm/include/asm/hardware/debug-8250.S29
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/memblock.h3
-rw-r--r--arch/arm/include/asm/module.h2
-rw-r--r--arch/arm/include/asm/neon.h36
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/prom.h4
-rw-r--r--arch/arm/include/asm/smp_plat.h3
-rw-r--r--arch/arm/include/asm/spinlock.h53
-rw-r--r--arch/arm/include/asm/switch_to.h10
-rw-r--r--arch/arm/include/asm/thread_info.h11
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/include/asm/tlbflush.h181
-rw-r--r--arch/arm/include/asm/types.h40
-rw-r--r--arch/arm/include/asm/v7m.h12
-rw-r--r--arch/arm/include/asm/xor.h73
-rw-r--r--arch/arm/include/debug/8250.S54
-rw-r--r--arch/arm/include/debug/8250_32.S27
-rw-r--r--arch/arm/include/debug/bcm2835.S22
-rw-r--r--arch/arm/include/debug/cns3xxx.S19
-rw-r--r--arch/arm/include/debug/highbank.S17
-rw-r--r--arch/arm/include/debug/keystone.S43
-rw-r--r--arch/arm/include/debug/mvebu.S30
-rw-r--r--arch/arm/include/debug/mxs.S27
-rw-r--r--arch/arm/include/debug/nomadik.S20
-rw-r--r--arch/arm/include/debug/nspire.S28
-rw-r--r--arch/arm/include/debug/picoxcell.S19
-rw-r--r--arch/arm/include/debug/pl01x.S (renamed from arch/arm/include/asm/hardware/debug-pl01x.S)9
-rw-r--r--arch/arm/include/debug/pxa.S33
-rw-r--r--arch/arm/include/debug/rockchip.S42
-rw-r--r--arch/arm/include/debug/socfpga.S21
-rw-r--r--arch/arm/include/debug/sunxi.S27
-rw-r--r--arch/arm/include/debug/tegra.S29
-rw-r--r--arch/arm/include/debug/u300.S18
-rw-r--r--arch/arm/include/debug/ux500.S2
-rw-r--r--arch/arm/include/debug/vexpress.S48
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/atags.h5
-rw-r--r--arch/arm/kernel/atags_parse.c6
-rw-r--r--arch/arm/kernel/bios32.c5
-rw-r--r--arch/arm/kernel/devtree.c11
-rw-r--r--arch/arm/kernel/entry-armv.S3
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/fiq.c9
-rw-r--r--arch/arm/kernel/machine_kexec.c21
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/perf_event_cpu.c3
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/setup.c21
-rw-r--r--arch/arm/kernel/smp.c23
-rw-r--r--arch/arm/kernel/smp_tlb.c10
-rw-r--r--arch/arm/kernel/topology.c61
-rw-r--r--arch/arm/kernel/traps.c66
-rw-r--r--arch/arm/kernel/v7m.c19
-rw-r--r--arch/arm/kvm/arm.c4
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/init.S2
-rw-r--r--arch/arm/kvm/interrupts.S12
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c37
-rw-r--r--arch/arm/kvm/reset.c2
-rw-r--r--arch/arm/kvm/trace.h7
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/xor-neon.c42
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-at91/include/mach/at91_adc.h16
-rw-r--r--arch/arm/mach-clps711x/Kconfig3
-rw-r--r--arch/arm/mach-clps711x/Makefile1
-rw-r--r--arch/arm/mach-clps711x/board-autcpu12.c6
-rw-r--r--arch/arm/mach-clps711x/board-edb7211.c17
-rw-r--r--arch/arm/mach-clps711x/board-fortunet.c85
-rw-r--r--arch/arm/mach-clps711x/devices.c2
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/cpuidle.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/debug-macro.S65
-rw-r--r--arch/arm/mach-dove/common.c4
-rw-r--r--arch/arm/mach-dove/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-dove/mpp.c2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/debug-macro.S22
-rw-r--r--arch/arm/mach-ep93xx/Kconfig14
-rw-r--r--arch/arm/mach-ep93xx/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h14
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-gemini/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c3
-rw-r--r--arch/arm/mach-integrator/include/mach/debug-macro.S20
-rw-r--r--arch/arm/mach-iop13xx/include/mach/debug-macro.S24
-rw-r--r--arch/arm/mach-iop32x/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-iop33x/include/mach/debug-macro.S22
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/debug-macro.S26
-rw-r--r--arch/arm/mach-keystone/Kconfig1
-rw-r--r--arch/arm/mach-keystone/platsmp.c1
-rw-r--r--arch/arm/mach-keystone/smc.S5
-rw-r--r--arch/arm/mach-kirkwood/common.c24
-rw-r--r--arch/arm/mach-kirkwood/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/debug-macro.S29
-rw-r--r--arch/arm/mach-msm/Kconfig3
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c6
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c2
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c6
-rw-r--r--arch/arm/mach-msm/gpiomux-v1.c33
-rw-r--r--arch/arm/mach-msm/gpiomux.h10
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-mvebu/platsmp.c51
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/am33xx-restart.c4
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c57
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c83
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c2
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c113
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c100
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c96
-rw-r--r--arch/arm/mach-omap2/board-h4.c48
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c36
-rw-r--r--arch/arm/mach-omap2/board-ldp.c68
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c56
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c87
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c48
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c61
-rw-r--r--arch/arm/mach-omap2/board-overo.c160
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c12
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c35
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c30
-rw-r--r--arch/arm/mach-omap2/devices.c40
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/dss-common.c244
-rw-r--r--arch/arm/mach-omap2/dss-common.h2
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/i2c.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c3
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-omap2/timer.c2
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mach-orion5x/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c46
-rw-r--r--arch/arm/mach-realview/include/mach/debug-macro.S29
-rw-r--r--arch/arm/mach-rpc/include/mach/debug-macro.S23
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva-reference.c4
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c50
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c44
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7740.c2
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7778.c5
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7779.c10
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S4
-rw-r--r--arch/arm/mach-shmobile/headsmp.S6
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7778.h3
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7779.h3
-rw-r--r--arch/arm/mach-shmobile/include/mach/zboot.h2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c34
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c37
-rw-r--r--arch/arm/mach-shmobile/sleep-sh7372.S2
-rw-r--r--arch/arm/mach-spear/include/mach/debug-macro.S36
-rw-r--r--arch/arm/mach-spear/include/mach/spear.h2
-rw-r--r--arch/arm/mach-sti/headsmp.S2
-rw-r--r--arch/arm/mach-tegra/tegra.c38
-rw-r--r--arch/arm/mach-ux500/Makefile1
-rw-r--r--arch/arm/mach-ux500/cpuidle.c128
-rw-r--r--arch/arm/mach-versatile/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/mm/cache-l2x0.c12
-rw-r--r--arch/arm/mm/cache-v7.S4
-rw-r--r--arch/arm/mm/context.c7
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/hugetlbpage.c43
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-feroceon.S26
-rw-r--r--arch/arm/mm/proc-v7.S16
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/plat-omap/dma.c1
-rw-r--r--arch/arm/plat-pxa/ssp.c171
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c13
-rw-r--r--arch/arm/vfp/vfphw.S5
-rw-r--r--arch/arm/vfp/vfpmodule.c69
-rw-r--r--arch/arm/xen/enlighten.c1
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/include/asm/arch_timer.h23
-rw-r--r--arch/arm64/include/asm/elf.h3
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/neon.h14
-rw-r--r--arch/arm64/include/asm/pgtable-2level-types.h2
-rw-r--r--arch/arm64/include/asm/pgtable-3level-types.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/kernel/entry.S3
-rw-r--r--arch/arm64/kernel/fpsimd.c28
-rw-r--r--arch/arm64/kernel/head.S8
-rw-r--r--arch/arm64/kernel/perf_event.c17
-rw-r--r--arch/arm64/kernel/setup.c3
-rw-r--r--arch/arm64/kernel/smp.c6
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/arm64/mm/mmu.c23
-rw-r--r--arch/arm64/mm/proc.S6
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/oprofile/op_model_avr32.c17
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/bitops.h8
-rw-r--r--arch/ia64/include/asm/dmi.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h5
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/m68k/amiga/platform.c2
-rw-r--r--arch/m68k/emu/natfeat.c27
-rw-r--r--arch/m68k/emu/nfblock.c4
-rw-r--r--arch/m68k/emu/nfcon.c8
-rw-r--r--arch/m68k/emu/nfeth.c7
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/m68k/include/asm/irqflags.h6
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/m68k/platform/coldfire/pci.c1
-rw-r--r--arch/m68k/q40/config.c2
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/Makefile3
-rw-r--r--arch/microblaze/boot/Makefile7
-rw-r--r--arch/microblaze/include/asm/prom.h3
-rw-r--r--arch/microblaze/include/asm/selfmod.h24
-rw-r--r--arch/microblaze/kernel/Makefile2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c2
-rw-r--r--arch/microblaze/kernel/intc.c85
-rw-r--r--arch/microblaze/kernel/irq.c10
-rw-r--r--arch/microblaze/kernel/reset.c6
-rw-r--r--arch/microblaze/kernel/selfmod.c81
-rw-r--r--arch/microblaze/kernel/setup.c10
-rw-r--r--arch/microblaze/kernel/timer.c186
-rw-r--r--arch/microblaze/pci/pci-common.c106
-rw-r--r--arch/microblaze/platform/Kconfig.platform22
-rw-r--r--arch/mips/Kconfig6
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/kernel/smp-bmips.c10
-rw-r--r--arch/mips/kernel/vpe.c17
-rw-r--r--arch/mips/kvm/kvm_locore.S969
-rw-r--r--arch/mips/kvm/kvm_mips.c4
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/mips/oprofile/common.c20
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci.c1
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/sni/a20r.c1
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/include/asm/prom.h3
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/powerpc/Kconfig29
-rw-r--r--arch/powerpc/Makefile18
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/dts/ac14xx.dts2
-rw-r--r--arch/powerpc/boot/dts/b4420qds.dts2
-rw-r--r--arch/powerpc/boot/dts/b4860qds.dts2
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi (renamed from arch/powerpc/boot/dts/b4qds.dts)0
-rw-r--r--arch/powerpc/boot/dts/c293pcie.dts223
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/c293si-post.dtsi193
-rw-r--r--arch/powerpc/boot/dts/fsl/c293si-pre.dtsi63
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi149
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi56
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi2
l---------arch/powerpc/boot/dts/include/dt-bindings1
-rw-r--r--arch/powerpc/boot/dts/mpc5121ads.dts2
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pd.dts280
-rw-r--r--arch/powerpc/boot/dts/p1023rdb.dts234
-rw-r--r--arch/powerpc/boot/dts/pdm360ng.dts2
-rw-r--r--arch/powerpc/boot/ppc_asm.h3
-rw-r--r--arch/powerpc/boot/util.S10
-rw-r--r--arch/powerpc/configs/85xx/p1023_defconfig (renamed from arch/powerpc/configs/85xx/p1023rds_defconfig)25
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig3
-rw-r--r--arch/powerpc/configs/mpc83xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig2
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/asm-compat.h9
-rw-r--r--arch/powerpc/include/asm/btext.h1
-rw-r--r--arch/powerpc/include/asm/cacheflush.h8
-rw-r--r--arch/powerpc/include/asm/cputable.h9
-rw-r--r--arch/powerpc/include/asm/emulated_ops.h2
-rw-r--r--arch/powerpc/include/asm/epapr_hcalls.h6
-rw-r--r--arch/powerpc/include/asm/exception-64s.h35
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/irqflags.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/lppaca.h68
-rw-r--r--arch/powerpc/include/asm/mpc5121.h18
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h92
-rw-r--r--arch/powerpc/include/asm/mpic.h7
-rw-r--r--arch/powerpc/include/asm/opal.h27
-rw-r--r--arch/powerpc/include/asm/paca.h11
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_fsl_emb.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/plpar_wrappers.h (renamed from arch/powerpc/platforms/pseries/plpar_wrappers.h)30
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h47
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h22
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/prom.h10
-rw-r--r--arch/powerpc/include/asm/reg.h56
-rw-r--r--arch/powerpc/include/asm/reg_booke.h8
-rw-r--r--arch/powerpc/include/asm/reg_fsl_emb.h24
-rw-r--r--arch/powerpc/include/asm/rtas.h8
-rw-r--r--arch/powerpc/include/asm/smp.h3
-rw-r--r--arch/powerpc/include/asm/spinlock.h6
-rw-r--r--arch/powerpc/include/asm/switch_to.h20
-rw-r--r--arch/powerpc/include/asm/timex.h4
-rw-r--r--arch/powerpc/include/asm/topology.h1
-rw-r--r--arch/powerpc/include/asm/udbg.h9
-rw-r--r--arch/powerpc/include/uapi/asm/elf.h21
-rw-r--r--arch/powerpc/kernel/Makefile5
-rw-r--r--arch/powerpc/kernel/align.c14
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/btext.c254
-rw-r--r--arch/powerpc/kernel/cacheinfo.c12
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S32
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c28
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S4
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S44
-rw-r--r--arch/powerpc/kernel/head_40x.S8
-rw-r--r--arch/powerpc/kernel/head_44x.S10
-rw-r--r--arch/powerpc/kernel/head_64.S1
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S10
-rw-r--r--arch/powerpc/kernel/io-workarounds.c19
-rw-r--r--arch/powerpc/kernel/io.c3
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/legacy_serial.c62
-rw-r--r--arch/powerpc/kernel/misc_32.S3
-rw-r--r--arch/powerpc/kernel/misc_64.S50
-rw-r--r--arch/powerpc/kernel/paca.c10
-rw-r--r--arch/powerpc/kernel/pci-common.c21
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/pci_dn.c20
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c23
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/prom.c125
-rw-r--r--arch/powerpc/kernel/prom_init.c269
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh3
-rw-r--r--arch/powerpc/kernel/prom_parse.c17
-rw-r--r--arch/powerpc/kernel/rtas.c66
-rw-r--r--arch/powerpc/kernel/setup-common.c13
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c40
-rw-r--r--arch/powerpc/kernel/signal_32.c9
-rw-r--r--arch/powerpc/kernel/signal_64.c18
-rw-r--r--arch/powerpc/kernel/smp.c156
-rw-r--r--arch/powerpc/kernel/softemu8xx.c199
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S45
-rw-r--r--arch/powerpc/kernel/swsusp_booke.S8
-rw-r--r--arch/powerpc/kernel/time.c20
-rw-r--r--arch/powerpc/kernel/tm.S24
-rw-r--r--arch/powerpc/kernel/traps.c159
-rw-r--r--arch/powerpc/kernel/udbg_16550.c370
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S6
-rw-r--r--arch/powerpc/kernel/vio.c33
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S4
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c246
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.c240
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.h27
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c143
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S14
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S14
-rw-r--r--arch/powerpc/kvm/book3s_pr.c40
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/booke.c6
-rw-r--r--arch/powerpc/kvm/emulate.c45
-rw-r--r--arch/powerpc/kvm/powerpc.c26
-rw-r--r--arch/powerpc/lib/locks.c4
-rw-r--r--arch/powerpc/lib/sstep.c8
-rw-r--r--arch/powerpc/math-emu/Makefile24
-rw-r--r--arch/powerpc/math-emu/math.c89
-rw-r--r--arch/powerpc/mm/fault.c6
-rw-r--r--arch/powerpc/mm/gup.c37
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/numa.c102
-rw-r--r--arch/powerpc/mm/slb.c9
-rw-r--r--arch/powerpc/mm/subpage-prot.c4
-rw-r--r--arch/powerpc/oprofile/common.c28
-rw-r--r--arch/powerpc/oprofile/op_model_fsl_emb.c30
-rw-r--r--arch/powerpc/perf/Makefile2
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c30
-rw-r--r--arch/powerpc/perf/e6500-pmu.c121
-rw-r--r--arch/powerpc/perf/power7-events-list.h548
-rw-r--r--arch/powerpc/perf/power7-pmu.c148
-rw-r--r--arch/powerpc/platforms/44x/warp.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c3
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig10
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c75
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c22
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c24
-rw-r--r--arch/powerpc/platforms/85xx/smp.c26
-rw-r--r--arch/powerpc/platforms/Kconfig7
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype13
-rw-r--r--arch/powerpc/platforms/cell/iommu.c2
-rw-r--r--arch/powerpc/platforms/cell/smp.c15
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c22
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c203
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c18
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c47
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c17
-rw-r--r--arch/powerpc/platforms/powernv/smp.c18
-rw-r--r--arch/powerpc/platforms/ps3/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c67
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c7
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c19
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c (renamed from arch/powerpc/kernel/lparcfg.c)36
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c45
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c162
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c12
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h5
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/platforms/pseries/smp.c20
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h1
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c137
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h10
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c184
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h6
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c10
-rw-r--r--arch/powerpc/xmon/xmon.c31
-rw-r--r--arch/s390/Kconfig20
-rw-r--r--arch/s390/boot/compressed/Makefile9
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/hypfs/hypfs.h13
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c50
-rw-r--r--arch/s390/hypfs/hypfs_vm.c65
-rw-r--r--arch/s390/hypfs/inode.c36
-rw-r--r--arch/s390/include/asm/airq.h67
-rw-r--r--arch/s390/include/asm/bitops.h14
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/cputime.h3
-rw-r--r--arch/s390/include/asm/hardirq.h5
-rw-r--r--arch/s390/include/asm/hugetlb.h135
-rw-r--r--arch/s390/include/asm/hw_irq.h17
-rw-r--r--arch/s390/include/asm/irq.h35
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h22
-rw-r--r--arch/s390/include/asm/page.h19
-rw-r--r--arch/s390/include/asm/pci.h54
-rw-r--r--arch/s390/include/asm/pci_insn.h12
-rw-r--r--arch/s390/include/asm/pci_io.h10
-rw-r--r--arch/s390/include/asm/pgtable.h648
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/serial.h6
-rw-r--r--arch/s390/include/asm/switch_to.h9
-rw-r--r--arch/s390/include/asm/tlb.h11
-rw-r--r--arch/s390/include/asm/tlbflush.h6
-rw-r--r--arch/s390/include/asm/vtime.h7
-rw-r--r--arch/s390/kernel/entry.S16
-rw-r--r--arch/s390/kernel/entry64.S11
-rw-r--r--arch/s390/kernel/irq.c160
-rw-r--r--arch/s390/kernel/kprobes.c21
-rw-r--r--arch/s390/kernel/nmi.c5
-rw-r--r--arch/s390/kernel/perf_event.c9
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/suspend.c11
-rw-r--r--arch/s390/kernel/swsusp_asm64.S7
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/vdso.c6
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/diag.c17
-rw-r--r--arch/s390/kvm/gaccess.h12
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/kvm/priv.c36
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c16
-rw-r--r--arch/s390/mm/dump_pagetables.c18
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c124
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c266
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/oprofile/init.c37
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c575
-rw-r--r--arch/s390/pci/pci_clp.c146
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/s390/pci/pci_event.c2
-rw-r--r--arch/s390/pci/pci_insn.c18
-rw-r--r--arch/s390/pci/pci_msi.c142
-rw-r--r--arch/s390/pci/pci_sysfs.c27
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/boards/board-espt.c1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c3
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c1
-rw-r--r--arch/sh/drivers/pci/pci.c1
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c11
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c4
-rw-r--r--arch/sparc/include/asm/switch_to_64.h4
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/kgdb_64.c4
-rw-r--r--arch/sparc/kernel/ktlb.S3
-rw-r--r--arch/sparc/kernel/ptrace_64.c4
-rw-r--r--arch/sparc/kernel/setup_64.c12
-rw-r--r--arch/sparc/kernel/syscalls.S12
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/lib/ksyms.c9
-rw-r--r--arch/tile/Kconfig31
-rw-r--r--arch/tile/Kconfig.debug14
-rw-r--r--arch/tile/Makefile4
-rw-r--r--arch/tile/configs/tilegx_defconfig241
-rw-r--r--arch/tile/configs/tilepro_defconfig87
-rw-r--r--arch/tile/gxio/Kconfig5
-rw-r--r--arch/tile/gxio/Makefile1
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c66
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c18
-rw-r--r--arch/tile/gxio/iorpc_trio.c23
-rw-r--r--arch/tile/gxio/iorpc_uart.c77
-rw-r--r--arch/tile/gxio/mpipe.c43
-rw-r--r--arch/tile/gxio/uart.c87
-rw-r--r--arch/tile/include/arch/trio.h39
-rw-r--r--arch/tile/include/arch/uart.h300
-rw-r--r--arch/tile/include/arch/uart_def.h120
-rw-r--r--arch/tile/include/asm/Kbuild3
-rw-r--r--arch/tile/include/asm/atomic.h52
-rw-r--r--arch/tile/include/asm/atomic_32.h102
-rw-r--r--arch/tile/include/asm/atomic_64.h42
-rw-r--r--arch/tile/include/asm/barrier.h4
-rw-r--r--arch/tile/include/asm/bitops.h41
-rw-r--r--arch/tile/include/asm/bitops_32.h2
-rw-r--r--arch/tile/include/asm/bitops_64.h8
-rw-r--r--arch/tile/include/asm/cache.h13
-rw-r--r--arch/tile/include/asm/cacheflush.h44
-rw-r--r--arch/tile/include/asm/cmpxchg.h93
-rw-r--r--arch/tile/include/asm/device.h5
-rw-r--r--arch/tile/include/asm/dma-mapping.h27
-rw-r--r--arch/tile/include/asm/elf.h10
-rw-r--r--arch/tile/include/asm/fixmap.h8
-rw-r--r--arch/tile/include/asm/ftrace.h22
-rw-r--r--arch/tile/include/asm/futex.h1
-rw-r--r--arch/tile/include/asm/homecache.h11
-rw-r--r--arch/tile/include/asm/io.h132
-rw-r--r--arch/tile/include/asm/irqflags.h21
-rw-r--r--arch/tile/include/asm/kdebug.h (renamed from arch/tile/include/asm/hw_irq.h)18
-rw-r--r--arch/tile/include/asm/kgdb.h71
-rw-r--r--arch/tile/include/asm/kprobes.h79
-rw-r--r--arch/tile/include/asm/mmu.h1
-rw-r--r--arch/tile/include/asm/mmu_context.h2
-rw-r--r--arch/tile/include/asm/mmzone.h2
-rw-r--r--arch/tile/include/asm/page.h61
-rw-r--r--arch/tile/include/asm/pci.h22
-rw-r--r--arch/tile/include/asm/pgtable_32.h4
-rw-r--r--arch/tile/include/asm/pgtable_64.h27
-rw-r--r--arch/tile/include/asm/processor.h84
-rw-r--r--arch/tile/include/asm/ptrace.h6
-rw-r--r--arch/tile/include/asm/sections.h8
-rw-r--r--arch/tile/include/asm/setup.h3
-rw-r--r--arch/tile/include/asm/smp.h2
-rw-r--r--arch/tile/include/asm/spinlock_64.h4
-rw-r--r--arch/tile/include/asm/string.h2
-rw-r--r--arch/tile/include/asm/thread_info.h6
-rw-r--r--arch/tile/include/asm/topology.h3
-rw-r--r--arch/tile/include/asm/traps.h13
-rw-r--r--arch/tile/include/asm/uaccess.h37
-rw-r--r--arch/tile/include/asm/unaligned.h14
-rw-r--r--arch/tile/include/asm/vdso.h49
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h14
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h4
-rw-r--r--arch/tile/include/gxio/iorpc_trio.h5
-rw-r--r--arch/tile/include/gxio/iorpc_uart.h40
-rw-r--r--arch/tile/include/gxio/mpipe.h143
-rw-r--r--arch/tile/include/gxio/uart.h105
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h3
-rw-r--r--arch/tile/include/hv/drv_trio_intf.h8
-rw-r--r--arch/tile/include/hv/drv_uart_intf.h33
-rw-r--r--arch/tile/include/hv/hypervisor.h61
-rw-r--r--arch/tile/include/uapi/arch/Kbuild1
-rw-r--r--arch/tile/include/uapi/arch/chip.h4
-rw-r--r--arch/tile/include/uapi/arch/chip_tile64.h258
-rw-r--r--arch/tile/include/uapi/arch/opcode_tilegx.h1
-rw-r--r--arch/tile/include/uapi/arch/opcode_tilepro.h1
-rw-r--r--arch/tile/include/uapi/arch/spr_def_32.h2
-rw-r--r--arch/tile/include/uapi/asm/auxvec.h3
-rw-r--r--arch/tile/include/uapi/asm/cachectl.h4
-rw-r--r--arch/tile/kernel/Makefile16
-rw-r--r--arch/tile/kernel/asm-offsets.c52
-rw-r--r--arch/tile/kernel/compat_signal.c3
-rw-r--r--arch/tile/kernel/early_printk.c47
-rw-r--r--arch/tile/kernel/entry.S16
-rw-r--r--arch/tile/kernel/ftrace.c246
-rw-r--r--arch/tile/kernel/hardwall.c28
-rw-r--r--arch/tile/kernel/head_32.S17
-rw-r--r--arch/tile/kernel/head_64.S46
-rw-r--r--arch/tile/kernel/hvglue.S74
-rw-r--r--arch/tile/kernel/hvglue.lds59
-rw-r--r--arch/tile/kernel/hvglue_trace.c266
-rw-r--r--arch/tile/kernel/intvec_32.S114
-rw-r--r--arch/tile/kernel/intvec_64.S305
-rw-r--r--arch/tile/kernel/irq.c8
-rw-r--r--arch/tile/kernel/kgdb.c499
-rw-r--r--arch/tile/kernel/kprobes.c528
-rw-r--r--arch/tile/kernel/mcount_64.S224
-rw-r--r--arch/tile/kernel/pci-dma.c74
-rw-r--r--arch/tile/kernel/pci.c33
-rw-r--r--arch/tile/kernel/pci_gx.c736
-rw-r--r--arch/tile/kernel/proc.c2
-rw-r--r--arch/tile/kernel/process.c116
-rw-r--r--arch/tile/kernel/ptrace.c19
-rw-r--r--arch/tile/kernel/reboot.c2
-rw-r--r--arch/tile/kernel/regs_32.S4
-rw-r--r--arch/tile/kernel/regs_64.S4
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S27
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S11
-rw-r--r--arch/tile/kernel/setup.c162
-rw-r--r--arch/tile/kernel/signal.c3
-rw-r--r--arch/tile/kernel/single_step.c118
-rw-r--r--arch/tile/kernel/smp.c22
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/stack.c51
-rw-r--r--arch/tile/kernel/sys.c4
-rw-r--r--arch/tile/kernel/sysfs.c76
-rw-r--r--arch/tile/kernel/time.c37
-rw-r--r--arch/tile/kernel/tlb.c8
-rw-r--r--arch/tile/kernel/traps.c89
-rw-r--r--arch/tile/kernel/unaligned.c1609
-rw-r--r--arch/tile/kernel/vdso.c212
-rw-r--r--arch/tile/kernel/vdso/Makefile118
-rw-r--r--arch/tile/kernel/vdso/vdso.S28
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S87
-rw-r--r--arch/tile/kernel/vdso/vdso32.S28
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c107
-rw-r--r--arch/tile/kernel/vdso/vrt_sigreturn.S30
-rw-r--r--arch/tile/kernel/vmlinux.lds.S31
-rw-r--r--arch/tile/lib/Makefile16
-rw-r--r--arch/tile/lib/atomic_32.c133
-rw-r--r--arch/tile/lib/atomic_asm_32.S1
-rw-r--r--arch/tile/lib/cacheflush.c16
-rw-r--r--arch/tile/lib/exports.c7
-rw-r--r--arch/tile/lib/memchr_64.c2
-rw-r--r--arch/tile/lib/memcpy_32.S63
-rw-r--r--arch/tile/lib/memcpy_64.c264
-rw-r--r--arch/tile/lib/memcpy_tile64.c276
-rw-r--r--arch/tile/lib/memcpy_user_64.c2
-rw-r--r--arch/tile/lib/memset_32.c110
-rw-r--r--arch/tile/lib/memset_64.c9
-rw-r--r--arch/tile/lib/strchr_32.c2
-rw-r--r--arch/tile/lib/strchr_64.c2
-rw-r--r--arch/tile/lib/string-endian.h13
-rw-r--r--arch/tile/lib/strlen_32.c2
-rw-r--r--arch/tile/lib/strnlen_32.c47
-rw-r--r--arch/tile/lib/strnlen_64.c48
-rw-r--r--arch/tile/lib/usercopy_32.S36
-rw-r--r--arch/tile/lib/usercopy_64.S36
-rw-r--r--arch/tile/mm/elf.c99
-rw-r--r--arch/tile/mm/fault.c135
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/homecache.c39
-rw-r--r--arch/tile/mm/hugetlbpage.c38
-rw-r--r--arch/tile/mm/init.c96
-rw-r--r--arch/tile/mm/migrate_32.S4
-rw-r--r--arch/tile/mm/migrate_64.S4
-rw-r--r--arch/tile/mm/mmap.c24
-rw-r--r--arch/tile/mm/pgtable.c76
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/Kconfig62
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/boot/compressed/head_32.S31
-rw-r--r--arch/x86/boot/compressed/head_64.S1
-rw-r--r--arch/x86/boot/compressed/misc.c77
-rw-r--r--arch/x86/boot/printf.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative.h14
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/bitops.h46
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/checksum_32.h22
-rw-r--r--arch/x86/include/asm/checksum_64.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h17
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h120
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/kprobes.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h14
-rw-r--r--arch/x86/include/asm/kvm_para.h38
-rw-r--r--arch/x86/include/asm/mce.h16
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/include/asm/mutex_64.h30
-rw-r--r--arch/x86/include/asm/page_32_types.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h5
-rw-r--r--arch/x86/include/asm/page_types.h5
-rw-r--r--arch/x86/include/asm/paravirt.h32
-rw-r--r--arch/x86/include/asm/paravirt_types.h17
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h33
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/processor.h34
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/setup.h8
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/spinlock.h137
-rw-r--r--arch/x86/include/asm/spinlock_types.h16
-rw-r--r--arch/x86/include/asm/switch_to.h4
-rw-r--r--arch/x86/include/asm/sync_bitops.h24
-rw-r--r--arch/x86/include/asm/syscall.h3
-rw-r--r--arch/x86/include/asm/syscalls.h6
-rw-r--r--arch/x86/include/asm/sysfb.h98
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/include/asm/traps.h6
-rw-r--r--arch/x86/include/asm/tsc.h1
-rw-r--r--arch/x86/include/asm/uaccess.h7
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/include/asm/vvar.h2
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h16
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/alternative.c155
-rw-r--r--arch/x86/kernel/amd_nb.c13
-rw-r--r--arch/x86/kernel/apic/apic.c12
-rw-r--r--arch/x86/kernel/apic/io_apic.c14
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c24
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c28
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c42
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c181
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c32
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c258
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h10
-rw-r--r--arch/x86/kernel/cpu/vmware.c8
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/e820.c5
-rw-r--r--arch/x86/kernel/early-quirks.c14
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/irq.c8
-rw-r--r--arch/x86/kernel/irq_work.c4
-rw-r--r--arch/x86/kernel/jump_label.c16
-rw-r--r--arch/x86/kernel/kprobes/common.h5
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c115
-rw-r--r--arch/x86/kernel/kvm.c268
-rw-r--r--arch/x86/kernel/microcode_amd.c36
-rw-r--r--arch/x86/kernel/microcode_amd_early.c27
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c18
-rw-r--r--arch/x86/kernel/paravirt.c9
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/kernel/signal.c12
-rw-r--r--arch/x86/kernel/smp.c12
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/syscall_32.c2
-rw-r--r--arch/x86/kernel/syscall_64.c5
-rw-r--r--arch/x86/kernel/sysfb.c74
-rw-r--r--arch/x86/kernel/sysfb_efi.c214
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c95
-rw-r--r--arch/x86/kernel/tboot.c10
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/lapic.c38
-rw-r--r--arch/x86/kvm/mmu.c181
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h178
-rw-r--r--arch/x86/kvm/pmu.c25
-rw-r--r--arch/x86/kvm/vmx.c441
-rw-r--r--arch/x86/kvm/x86.c224
-rw-r--r--arch/x86/lib/csum-wrappers_64.c12
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/lib/x86-opcode-map.txt42
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/mm/srat.c11
-rw-r--r--arch/x86/oprofile/nmi_int.c18
-rw-r--r--arch/x86/oprofile/op_model_amd.c24
-rw-r--r--arch/x86/pci/acpi.c9
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/pci/mrst.c41
-rw-r--r--arch/x86/power/cpu.c8
-rw-r--r--arch/x86/power/hibernate_64.c12
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--arch/x86/vdso/vclock_gettime.c16
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/irq.c25
-rw-r--r--arch/x86/xen/p2m.c22
-rw-r--r--arch/x86/xen/setup.c51
-rw-r--r--arch/x86/xen/smp.c19
-rw-r--r--arch/x86/xen/spinlock.c387
-rw-r--r--arch/x86/xen/xen-ops.h16
912 files changed, 22544 insertions, 12134 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4a..1feb169274fe 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
407 help 407 help
408 Architecture has the first two arguments of clone(2) swapped. 408 Architecture has the first two arguments of clone(2) swapped.
409 409
410config CLONE_BACKWARDS3
411 bool
412 help
413 Architecture has tls passed as the 3rd argument of clone(2),
414 not the 5th one.
415
410config ODD_RT_SIGACTION 416config ODD_RT_SIGACTION
411 bool 417 bool
412 help 418 help
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index b8ce18f485d3..310a4ce1dccc 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -106,7 +106,7 @@ op_axp_stop(void)
106} 106}
107 107
108static int 108static int
109op_axp_create_files(struct super_block *sb, struct dentry *root) 109op_axp_create_files(struct dentry *root)
110{ 110{
111 int i; 111 int i;
112 112
@@ -115,23 +115,23 @@ op_axp_create_files(struct super_block *sb, struct dentry *root)
115 char buf[4]; 115 char buf[4];
116 116
117 snprintf(buf, sizeof buf, "%d", i); 117 snprintf(buf, sizeof buf, "%d", i);
118 dir = oprofilefs_mkdir(sb, root, buf); 118 dir = oprofilefs_mkdir(root, buf);
119 119
120 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 120 oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
121 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 121 oprofilefs_create_ulong(dir, "event", &ctr[i].event);
122 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 122 oprofilefs_create_ulong(dir, "count", &ctr[i].count);
123 /* Dummies. */ 123 /* Dummies. */
124 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 124 oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
125 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 125 oprofilefs_create_ulong(dir, "user", &ctr[i].user);
126 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 126 oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
127 } 127 }
128 128
129 if (model->can_set_proc_mode) { 129 if (model->can_set_proc_mode) {
130 oprofilefs_create_ulong(sb, root, "enable_pal", 130 oprofilefs_create_ulong(root, "enable_pal",
131 &sys.enable_pal); 131 &sys.enable_pal);
132 oprofilefs_create_ulong(sb, root, "enable_kernel", 132 oprofilefs_create_ulong(root, "enable_kernel",
133 &sys.enable_kernel); 133 &sys.enable_kernel);
134 oprofilefs_create_ulong(sb, root, "enable_user", 134 oprofilefs_create_ulong(root, "enable_user",
135 &sys.enable_user); 135 &sys.enable_user);
136 } 136 }
137 137
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
39 ld.a r2,[r0,4] 39 ld.a r2,[r0,4]
40 sub r12,r6,r7 40 sub r12,r6,r7
41 bic r12,r12,r6 41 bic r12,r12,r6
42#ifdef __LITTLE_ENDIAN__
42 and r7,r12,r4 43 and r7,r12,r4
43 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. 44 breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
44 b .Lfound_char ; Likewise this one. 45 b .Lfound_char ; Likewise this one.
46#else
47 and r12,r12,r4
48 breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
49 lsr_s r12,r12,7
50 bic r2,r7,r6
51 b.d .Lfound_char_b
52 and_s r2,r2,r12
53#endif
45; /* We require this code address to be unaligned for speed... */ 54; /* We require this code address to be unaligned for speed... */
46.Laligned: 55.Laligned:
47 ld_s r2,[r0] 56 ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
95 lsr r7,r7,7 104 lsr r7,r7,7
96 105
97 bic r2,r7,r6 106 bic r2,r7,r6
107.Lfound_char_b:
98 norm r2,r2 108 norm r2,r2
99 sub_s r0,r0,4 109 sub_s r0,r0,4
100 asr_s r2,r2,3 110 asr_s r2,r2,3
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 39119d64287c..bf7976439c39 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -52,6 +52,7 @@ config ARM
52 select HAVE_REGS_AND_STACK_ACCESS_API 52 select HAVE_REGS_AND_STACK_ACCESS_API
53 select HAVE_SYSCALL_TRACEPOINTS 53 select HAVE_SYSCALL_TRACEPOINTS
54 select HAVE_UID16 54 select HAVE_UID16
55 select IRQ_FORCED_THREADING
55 select KTIME_SCALAR 56 select KTIME_SCALAR
56 select PERF_USE_VMALLOC 57 select PERF_USE_VMALLOC
57 select RTC_LIB 58 select RTC_LIB
@@ -1372,6 +1373,15 @@ config ARM_ERRATA_798181
1372 which sends an IPI to the CPUs that are running the same ASID 1373 which sends an IPI to the CPUs that are running the same ASID
1373 as the one being invalidated. 1374 as the one being invalidated.
1374 1375
1376config ARM_ERRATA_773022
1377 bool "ARM errata: incorrect instructions may be executed from loop buffer"
1378 depends on CPU_V7
1379 help
1380 This option enables the workaround for the 773022 Cortex-A15
1381 (up to r0p4) erratum. In certain rare sequences of code, the
1382 loop buffer may deliver incorrect instructions. This
1383 workaround disables the loop buffer to avoid the erratum.
1384
1375endmenu 1385endmenu
1376 1386
1377source "arch/arm/common/Kconfig" 1387source "arch/arm/common/Kconfig"
@@ -1603,13 +1613,49 @@ config ARCH_NR_GPIO
1603 1613
1604source kernel/Kconfig.preempt 1614source kernel/Kconfig.preempt
1605 1615
1606config HZ 1616config HZ_FIXED
1607 int 1617 int
1608 default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \ 1618 default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
1609 ARCH_S5PV210 || ARCH_EXYNOS4 1619 ARCH_S5PV210 || ARCH_EXYNOS4
1610 default AT91_TIMER_HZ if ARCH_AT91 1620 default AT91_TIMER_HZ if ARCH_AT91
1611 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE 1621 default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
1612 default 100 1622
1623choice
1624 depends on !HZ_FIXED
1625 prompt "Timer frequency"
1626
1627config HZ_100
1628 bool "100 Hz"
1629
1630config HZ_200
1631 bool "200 Hz"
1632
1633config HZ_250
1634 bool "250 Hz"
1635
1636config HZ_300
1637 bool "300 Hz"
1638
1639config HZ_500
1640 bool "500 Hz"
1641
1642config HZ_1000
1643 bool "1000 Hz"
1644
1645endchoice
1646
1647config HZ
1648 int
1649 default HZ_FIXED if HZ_FIXED
1650 default 100 if HZ_100
1651 default 200 if HZ_200
1652 default 250 if HZ_250
1653 default 300 if HZ_300
1654 default 500 if HZ_500
1655 default 1000
1656
1657config SCHED_HRTICK
1658 def_bool HIGH_RES_TIMERS
1613 1659
1614config SCHED_HRTICK 1660config SCHED_HRTICK
1615 def_bool HIGH_RES_TIMERS 1661 def_bool HIGH_RES_TIMERS
@@ -1746,6 +1792,9 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
1746 def_bool y 1792 def_bool y
1747 depends on ARM_LPAE 1793 depends on ARM_LPAE
1748 1794
1795config ARCH_WANT_GENERAL_HUGETLB
1796 def_bool y
1797
1749source "mm/Kconfig" 1798source "mm/Kconfig"
1750 1799
1751config FORCE_MAX_ZONEORDER 1800config FORCE_MAX_ZONEORDER
@@ -2054,8 +2103,7 @@ config KEXEC
2054 2103
2055 It is an ongoing process to be certain the hardware in a machine 2104 It is an ongoing process to be certain the hardware in a machine
2056 is properly shutdown, so do not be surprised if this code does not 2105 is properly shutdown, so do not be surprised if this code does not
2057 initially work for you. It may help to enable device hotplugging 2106 initially work for you.
2058 support.
2059 2107
2060config ATAGS_PROC 2108config ATAGS_PROC
2061 bool "Export atags in procfs" 2109 bool "Export atags in procfs"
@@ -2165,6 +2213,13 @@ config NEON
2165 Say Y to include support code for NEON, the ARMv7 Advanced SIMD 2213 Say Y to include support code for NEON, the ARMv7 Advanced SIMD
2166 Extension. 2214 Extension.
2167 2215
2216config KERNEL_MODE_NEON
2217 bool "Support for NEON in kernel mode"
2218 default n
2219 depends on NEON
2220 help
2221 Say Y to include support for NEON in kernel mode.
2222
2168endmenu 2223endmenu
2169 2224
2170menu "Userspace binary formats" 2225menu "Userspace binary formats"
@@ -2189,7 +2244,7 @@ source "kernel/power/Kconfig"
2189 2244
2190config ARCH_SUSPEND_POSSIBLE 2245config ARCH_SUSPEND_POSSIBLE
2191 depends on !ARCH_S5PC100 2246 depends on !ARCH_S5PC100
2192 depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \ 2247 depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \
2193 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK 2248 CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
2194 def_bool y 2249 def_bool y
2195 2250
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 15337833d05d..9762c84b4198 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -92,6 +92,7 @@ choice
92 config DEBUG_BCM2835 92 config DEBUG_BCM2835
93 bool "Kernel low-level debugging on BCM2835 PL011 UART" 93 bool "Kernel low-level debugging on BCM2835 PL011 UART"
94 depends on ARCH_BCM2835 94 depends on ARCH_BCM2835
95 select DEBUG_UART_PL01X
95 96
96 config DEBUG_CLPS711X_UART1 97 config DEBUG_CLPS711X_UART1
97 bool "Kernel low-level debugging messages via UART1" 98 bool "Kernel low-level debugging messages via UART1"
@@ -110,6 +111,7 @@ choice
110 config DEBUG_CNS3XXX 111 config DEBUG_CNS3XXX
111 bool "Kernel Kernel low-level debugging on Cavium Networks CNS3xxx" 112 bool "Kernel Kernel low-level debugging on Cavium Networks CNS3xxx"
112 depends on ARCH_CNS3XXX 113 depends on ARCH_CNS3XXX
114 select DEBUG_UART_PL01X
113 help 115 help
114 Say Y here if you want the debug print routines to direct 116 Say Y here if you want the debug print routines to direct
115 their output to the CNS3xxx UART0. 117 their output to the CNS3xxx UART0.
@@ -117,6 +119,7 @@ choice
117 config DEBUG_DAVINCI_DA8XX_UART1 119 config DEBUG_DAVINCI_DA8XX_UART1
118 bool "Kernel low-level debugging on DaVinci DA8XX using UART1" 120 bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
119 depends on ARCH_DAVINCI_DA8XX 121 depends on ARCH_DAVINCI_DA8XX
122 select DEBUG_UART_8250
120 help 123 help
121 Say Y here if you want the debug print routines to direct 124 Say Y here if you want the debug print routines to direct
122 their output to UART1 serial port on DaVinci DA8XX devices. 125 their output to UART1 serial port on DaVinci DA8XX devices.
@@ -124,6 +127,7 @@ choice
124 config DEBUG_DAVINCI_DA8XX_UART2 127 config DEBUG_DAVINCI_DA8XX_UART2
125 bool "Kernel low-level debugging on DaVinci DA8XX using UART2" 128 bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
126 depends on ARCH_DAVINCI_DA8XX 129 depends on ARCH_DAVINCI_DA8XX
130 select DEBUG_UART_8250
127 help 131 help
128 Say Y here if you want the debug print routines to direct 132 Say Y here if you want the debug print routines to direct
129 their output to UART2 serial port on DaVinci DA8XX devices. 133 their output to UART2 serial port on DaVinci DA8XX devices.
@@ -131,6 +135,7 @@ choice
131 config DEBUG_DAVINCI_DMx_UART0 135 config DEBUG_DAVINCI_DMx_UART0
132 bool "Kernel low-level debugging on DaVinci DMx using UART0" 136 bool "Kernel low-level debugging on DaVinci DMx using UART0"
133 depends on ARCH_DAVINCI_DMx 137 depends on ARCH_DAVINCI_DMx
138 select DEBUG_UART_8250
134 help 139 help
135 Say Y here if you want the debug print routines to direct 140 Say Y here if you want the debug print routines to direct
136 their output to UART0 serial port on DaVinci DMx devices. 141 their output to UART0 serial port on DaVinci DMx devices.
@@ -138,6 +143,7 @@ choice
138 config DEBUG_DAVINCI_TNETV107X_UART1 143 config DEBUG_DAVINCI_TNETV107X_UART1
139 bool "Kernel low-level debugging on DaVinci TNETV107x using UART1" 144 bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
140 depends on ARCH_DAVINCI_TNETV107X 145 depends on ARCH_DAVINCI_TNETV107X
146 select DEBUG_UART_8250
141 help 147 help
142 Say Y here if you want the debug print routines to direct 148 Say Y here if you want the debug print routines to direct
143 their output to UART1 serial port on DaVinci TNETV107X 149 their output to UART1 serial port on DaVinci TNETV107X
@@ -174,9 +180,26 @@ choice
174 Say Y here if you want the debug print routines to direct 180 Say Y here if you want the debug print routines to direct
175 their output to the 8250 at PCI COM1. 181 their output to the 8250 at PCI COM1.
176 182
183 config DEBUG_HI3620_UART
184 bool "Hisilicon HI3620 Debug UART"
185 depends on ARCH_HI3xxx
186 select DEBUG_UART_PL01X
187 help
188 Say Y here if you want kernel low-level debugging support
189 on HI3620 UART.
190
191 config DEBUG_HI3716_UART
192 bool "Hisilicon Hi3716 Debug UART"
193 depends on ARCH_HI3xxx
194 select DEBUG_UART_PL01X
195 help
196 Say Y here if you want kernel low-level debugging support
197 on HI3716 UART.
198
177 config DEBUG_HIGHBANK_UART 199 config DEBUG_HIGHBANK_UART
178 bool "Kernel low-level debugging messages via Highbank UART" 200 bool "Kernel low-level debugging messages via Highbank UART"
179 depends on ARCH_HIGHBANK 201 depends on ARCH_HIGHBANK
202 select DEBUG_UART_PL01X
180 help 203 help
181 Say Y here if you want the debug print routines to direct 204 Say Y here if you want the debug print routines to direct
182 their output to the UART on Highbank based devices. 205 their output to the UART on Highbank based devices.
@@ -191,6 +214,7 @@ choice
191 config DEBUG_IMX23_UART 214 config DEBUG_IMX23_UART
192 bool "i.MX23 Debug UART" 215 bool "i.MX23 Debug UART"
193 depends on SOC_IMX23 216 depends on SOC_IMX23
217 select DEBUG_UART_PL01X
194 help 218 help
195 Say Y here if you want kernel low-level debugging support 219 Say Y here if you want kernel low-level debugging support
196 on i.MX23. 220 on i.MX23.
@@ -212,6 +236,7 @@ choice
212 config DEBUG_IMX28_UART 236 config DEBUG_IMX28_UART
213 bool "i.MX28 Debug UART" 237 bool "i.MX28 Debug UART"
214 depends on SOC_IMX28 238 depends on SOC_IMX28
239 select DEBUG_UART_PL01X
215 help 240 help
216 Say Y here if you want kernel low-level debugging support 241 Say Y here if you want kernel low-level debugging support
217 on i.MX28. 242 on i.MX28.
@@ -261,6 +286,7 @@ choice
261 config DEBUG_KEYSTONE_UART0 286 config DEBUG_KEYSTONE_UART0
262 bool "Kernel low-level debugging on KEYSTONE2 using UART0" 287 bool "Kernel low-level debugging on KEYSTONE2 using UART0"
263 depends on ARCH_KEYSTONE 288 depends on ARCH_KEYSTONE
289 select DEBUG_UART_8250
264 help 290 help
265 Say Y here if you want the debug print routines to direct 291 Say Y here if you want the debug print routines to direct
266 their output to UART0 serial port on KEYSTONE2 devices. 292 their output to UART0 serial port on KEYSTONE2 devices.
@@ -268,6 +294,7 @@ choice
268 config DEBUG_KEYSTONE_UART1 294 config DEBUG_KEYSTONE_UART1
269 bool "Kernel low-level debugging on KEYSTONE2 using UART1" 295 bool "Kernel low-level debugging on KEYSTONE2 using UART1"
270 depends on ARCH_KEYSTONE 296 depends on ARCH_KEYSTONE
297 select DEBUG_UART_8250
271 help 298 help
272 Say Y here if you want the debug print routines to direct 299 Say Y here if you want the debug print routines to direct
273 their output to UART1 serial port on KEYSTONE2 devices. 300 their output to UART1 serial port on KEYSTONE2 devices.
@@ -275,6 +302,7 @@ choice
275 config DEBUG_MMP_UART2 302 config DEBUG_MMP_UART2
276 bool "Kernel low-level debugging message via MMP UART2" 303 bool "Kernel low-level debugging message via MMP UART2"
277 depends on ARCH_MMP 304 depends on ARCH_MMP
305 select DEBUG_UART_8250
278 help 306 help
279 Say Y here if you want kernel low-level debugging support 307 Say Y here if you want kernel low-level debugging support
280 on MMP UART2. 308 on MMP UART2.
@@ -282,6 +310,7 @@ choice
282 config DEBUG_MMP_UART3 310 config DEBUG_MMP_UART3
283 bool "Kernel low-level debugging message via MMP UART3" 311 bool "Kernel low-level debugging message via MMP UART3"
284 depends on ARCH_MMP 312 depends on ARCH_MMP
313 select DEBUG_UART_8250
285 help 314 help
286 Say Y here if you want kernel low-level debugging support 315 Say Y here if you want kernel low-level debugging support
287 on MMP UART3. 316 on MMP UART3.
@@ -326,6 +355,7 @@ choice
326 config DEBUG_MVEBU_UART 355 config DEBUG_MVEBU_UART
327 bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)" 356 bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
328 depends on ARCH_MVEBU 357 depends on ARCH_MVEBU
358 select DEBUG_UART_8250
329 help 359 help
330 Say Y here if you want kernel low-level debugging support 360 Say Y here if you want kernel low-level debugging support
331 on MVEBU based platforms. 361 on MVEBU based platforms.
@@ -344,6 +374,7 @@ choice
344 config DEBUG_MVEBU_UART_ALTERNATE 374 config DEBUG_MVEBU_UART_ALTERNATE
345 bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)" 375 bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)"
346 depends on ARCH_MVEBU 376 depends on ARCH_MVEBU
377 select DEBUG_UART_8250
347 help 378 help
348 Say Y here if you want kernel low-level debugging support 379 Say Y here if you want kernel low-level debugging support
349 on MVEBU based platforms. 380 on MVEBU based platforms.
@@ -358,6 +389,7 @@ choice
358 config DEBUG_NOMADIK_UART 389 config DEBUG_NOMADIK_UART
359 bool "Kernel low-level debugging messages via NOMADIK UART" 390 bool "Kernel low-level debugging messages via NOMADIK UART"
360 depends on ARCH_NOMADIK 391 depends on ARCH_NOMADIK
392 select DEBUG_UART_PL01X
361 help 393 help
362 Say Y here if you want kernel low-level debugging support 394 Say Y here if you want kernel low-level debugging support
363 on NOMADIK based platforms. 395 on NOMADIK based platforms.
@@ -365,6 +397,7 @@ choice
365 config DEBUG_NSPIRE_CLASSIC_UART 397 config DEBUG_NSPIRE_CLASSIC_UART
366 bool "Kernel low-level debugging via TI-NSPIRE 8250 UART" 398 bool "Kernel low-level debugging via TI-NSPIRE 8250 UART"
367 depends on ARCH_NSPIRE 399 depends on ARCH_NSPIRE
400 select DEBUG_UART_8250
368 help 401 help
369 Say Y here if you want kernel low-level debugging support 402 Say Y here if you want kernel low-level debugging support
370 on TI-NSPIRE classic models. 403 on TI-NSPIRE classic models.
@@ -372,20 +405,82 @@ choice
372 config DEBUG_NSPIRE_CX_UART 405 config DEBUG_NSPIRE_CX_UART
373 bool "Kernel low-level debugging via TI-NSPIRE PL011 UART" 406 bool "Kernel low-level debugging via TI-NSPIRE PL011 UART"
374 depends on ARCH_NSPIRE 407 depends on ARCH_NSPIRE
408 select DEBUG_UART_PL01X
375 help 409 help
376 Say Y here if you want kernel low-level debugging support 410 Say Y here if you want kernel low-level debugging support
377 on TI-NSPIRE CX models. 411 on TI-NSPIRE CX models.
378 412
379 config DEBUG_OMAP2PLUS_UART 413 config DEBUG_OMAP2UART1
380 bool "Kernel low-level debugging messages via OMAP2PLUS UART" 414 bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
381 depends on ARCH_OMAP2PLUS 415 depends on ARCH_OMAP2PLUS
416 select DEBUG_OMAP2PLUS_UART
382 help 417 help
383 Say Y here if you want kernel low-level debugging support 418 This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
384 on OMAP2PLUS based platforms. 419 omap3 torpedo and 3530 lv som.
420
421 config DEBUG_OMAP2UART2
422 bool "Kernel low-level debugging messages via OMAP2/3/4 UART2"
423 depends on ARCH_OMAP2PLUS
424 select DEBUG_OMAP2PLUS_UART
425
426 config DEBUG_OMAP2UART3
427 bool "Kernel low-level debugging messages via OMAP2 UART3 (n8x0)"
428 depends on ARCH_OMAP2PLUS
429 select DEBUG_OMAP2PLUS_UART
430
431 config DEBUG_OMAP3UART3
432 bool "Kernel low-level debugging messages via OMAP3 UART3 (most omap3 boards)"
433 depends on ARCH_OMAP2PLUS
434 select DEBUG_OMAP2PLUS_UART
435 help
436 This covers at least cm_t3x, beagle, crane, devkit8000,
437 igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
438 and 3517evm.
439
440 config DEBUG_OMAP4UART3
441 bool "Kernel low-level debugging messages via OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
442 depends on ARCH_OMAP2PLUS
443 select DEBUG_OMAP2PLUS_UART
444
445 config DEBUG_OMAP3UART4
446 bool "Kernel low-level debugging messages via OMAP36XX UART4"
447 depends on ARCH_OMAP2PLUS
448 select DEBUG_OMAP2PLUS_UART
449
450 config DEBUG_OMAP4UART4
451 bool "Kernel low-level debugging messages via OMAP4/5 UART4"
452 depends on ARCH_OMAP2PLUS
453 select DEBUG_OMAP2PLUS_UART
454
455 config DEBUG_TI81XXUART1
456 bool "Kernel low-level debugging messages via TI81XX UART1 (ti8148evm)"
457 depends on ARCH_OMAP2PLUS
458 select DEBUG_OMAP2PLUS_UART
459
460 config DEBUG_TI81XXUART2
461 bool "Kernel low-level debugging messages via TI81XX UART2"
462 depends on ARCH_OMAP2PLUS
463 select DEBUG_OMAP2PLUS_UART
464
465 config DEBUG_TI81XXUART3
466 bool "Kernel low-level debugging messages via TI81XX UART3 (ti8168evm)"
467 depends on ARCH_OMAP2PLUS
468 select DEBUG_OMAP2PLUS_UART
469
470 config DEBUG_AM33XXUART1
471 bool "Kernel low-level debugging messages via AM33XX UART1"
472 depends on ARCH_OMAP2PLUS
473 select DEBUG_OMAP2PLUS_UART
474
475 config DEBUG_ZOOM_UART
476 bool "Kernel low-level debugging messages via Zoom2/3 UART"
477 depends on ARCH_OMAP2PLUS
478 select DEBUG_OMAP2PLUS_UART
385 479
386 config DEBUG_PICOXCELL_UART 480 config DEBUG_PICOXCELL_UART
387 depends on ARCH_PICOXCELL 481 depends on ARCH_PICOXCELL
388 bool "Use PicoXcell UART for low-level debug" 482 bool "Use PicoXcell UART for low-level debug"
483 select DEBUG_UART_8250
389 help 484 help
390 Say Y here if you want kernel low-level debugging support 485 Say Y here if you want kernel low-level debugging support
391 on PicoXcell based platforms. 486 on PicoXcell based platforms.
@@ -393,6 +488,7 @@ choice
393 config DEBUG_PXA_UART1 488 config DEBUG_PXA_UART1
394 depends on ARCH_PXA 489 depends on ARCH_PXA
395 bool "Use PXA UART1 for low-level debug" 490 bool "Use PXA UART1 for low-level debug"
491 select DEBUG_UART_8250
396 help 492 help
397 Say Y here if you want kernel low-level debugging support 493 Say Y here if you want kernel low-level debugging support
398 on PXA UART1. 494 on PXA UART1.
@@ -400,6 +496,7 @@ choice
400 config DEBUG_REALVIEW_STD_PORT 496 config DEBUG_REALVIEW_STD_PORT
401 bool "RealView Default UART" 497 bool "RealView Default UART"
402 depends on ARCH_REALVIEW 498 depends on ARCH_REALVIEW
499 select DEBUG_UART_PL01X
403 help 500 help
404 Say Y here if you want the debug print routines to direct 501 Say Y here if you want the debug print routines to direct
405 their output to the serial port on RealView EB, PB11MP, PBA8 502 their output to the serial port on RealView EB, PB11MP, PBA8
@@ -408,14 +505,64 @@ choice
408 config DEBUG_REALVIEW_PB1176_PORT 505 config DEBUG_REALVIEW_PB1176_PORT
409 bool "RealView PB1176 UART" 506 bool "RealView PB1176 UART"
410 depends on MACH_REALVIEW_PB1176 507 depends on MACH_REALVIEW_PB1176
508 select DEBUG_UART_PL01X
411 help 509 help
412 Say Y here if you want the debug print routines to direct 510 Say Y here if you want the debug print routines to direct
413 their output to the standard serial port on the RealView 511 their output to the standard serial port on the RealView
414 PB1176 platform. 512 PB1176 platform.
415 513
416 config DEBUG_ROCKCHIP_UART 514 config DEBUG_RK29_UART0
417 bool "Kernel low-level debugging messages via Rockchip UART" 515 bool "Kernel low-level debugging messages via Rockchip RK29 UART0"
516 depends on ARCH_ROCKCHIP
517 select DEBUG_UART_8250
518 help
519 Say Y here if you want kernel low-level debugging support
520 on Rockchip based platforms.
521
522 config DEBUG_RK29_UART1
523 bool "Kernel low-level debugging messages via Rockchip RK29 UART1"
524 depends on ARCH_ROCKCHIP
525 select DEBUG_UART_8250
526 help
527 Say Y here if you want kernel low-level debugging support
528 on Rockchip based platforms.
529
530 config DEBUG_RK29_UART2
531 bool "Kernel low-level debugging messages via Rockchip RK29 UART2"
532 depends on ARCH_ROCKCHIP
533 select DEBUG_UART_8250
534 help
535 Say Y here if you want kernel low-level debugging support
536 on Rockchip based platforms.
537
538 config DEBUG_RK3X_UART0
539 bool "Kernel low-level debugging messages via Rockchip RK3X UART0"
540 depends on ARCH_ROCKCHIP
541 select DEBUG_UART_8250
542 help
543 Say Y here if you want kernel low-level debugging support
544 on Rockchip based platforms.
545
546 config DEBUG_RK3X_UART1
547 bool "Kernel low-level debugging messages via Rockchip RK3X UART1"
548 depends on ARCH_ROCKCHIP
549 select DEBUG_UART_8250
550 help
551 Say Y here if you want kernel low-level debugging support
552 on Rockchip based platforms.
553
554 config DEBUG_RK3X_UART2
555 bool "Kernel low-level debugging messages via Rockchip RK3X UART2"
556 depends on ARCH_ROCKCHIP
557 select DEBUG_UART_8250
558 help
559 Say Y here if you want kernel low-level debugging support
560 on Rockchip based platforms.
561
562 config DEBUG_RK3X_UART3
563 bool "Kernel low-level debugging messages via Rockchip RK3X UART3"
418 depends on ARCH_ROCKCHIP 564 depends on ARCH_ROCKCHIP
565 select DEBUG_UART_8250
419 help 566 help
420 Say Y here if you want kernel low-level debugging support 567 Say Y here if you want kernel low-level debugging support
421 on Rockchip based platforms. 568 on Rockchip based platforms.
@@ -471,6 +618,7 @@ choice
471 config DEBUG_SOCFPGA_UART 618 config DEBUG_SOCFPGA_UART
472 depends on ARCH_SOCFPGA 619 depends on ARCH_SOCFPGA
473 bool "Use SOCFPGA UART for low-level debug" 620 bool "Use SOCFPGA UART for low-level debug"
621 select DEBUG_UART_8250
474 help 622 help
475 Say Y here if you want kernel low-level debugging support 623 Say Y here if you want kernel low-level debugging support
476 on SOCFPGA based platforms. 624 on SOCFPGA based platforms.
@@ -478,6 +626,7 @@ choice
478 config DEBUG_SUNXI_UART0 626 config DEBUG_SUNXI_UART0
479 bool "Kernel low-level debugging messages via sunXi UART0" 627 bool "Kernel low-level debugging messages via sunXi UART0"
480 depends on ARCH_SUNXI 628 depends on ARCH_SUNXI
629 select DEBUG_UART_8250
481 help 630 help
482 Say Y here if you want kernel low-level debugging support 631 Say Y here if you want kernel low-level debugging support
483 on Allwinner A1X based platforms on the UART0. 632 on Allwinner A1X based platforms on the UART0.
@@ -485,13 +634,59 @@ choice
485 config DEBUG_SUNXI_UART1 634 config DEBUG_SUNXI_UART1
486 bool "Kernel low-level debugging messages via sunXi UART1" 635 bool "Kernel low-level debugging messages via sunXi UART1"
487 depends on ARCH_SUNXI 636 depends on ARCH_SUNXI
637 select DEBUG_UART_8250
488 help 638 help
489 Say Y here if you want kernel low-level debugging support 639 Say Y here if you want kernel low-level debugging support
490 on Allwinner A1X based platforms on the UART1. 640 on Allwinner A1X based platforms on the UART1.
491 641
492 config DEBUG_TEGRA_UART 642 config TEGRA_DEBUG_UART_AUTO_ODMDATA
643 bool "Kernel low-level debugging messages via Tegra UART via ODMDATA"
644 depends on ARCH_TEGRA
645 select DEBUG_TEGRA_UART
646 help
647 Automatically determines which UART to use for low-level
648 debug based on the ODMDATA value. This value is part of
649 the BCT, and is written to the boot memory device using
650 nvflash, or other flashing tool. When bits 19:18 are 3,
651 then bits 17:15 indicate which UART to use; 0/1/2/3/4
652 are UART A/B/C/D/E.
653
654 config TEGRA_DEBUG_UARTA
655 bool "Kernel low-level debugging messages via Tegra UART A"
656 depends on ARCH_TEGRA
657 select DEBUG_TEGRA_UART
658 help
659 Say Y here if you want kernel low-level debugging support
660 on Tegra based platforms.
661
662 config TEGRA_DEBUG_UARTB
663 bool "Kernel low-level debugging messages via Tegra UART B"
664 depends on ARCH_TEGRA
665 select DEBUG_TEGRA_UART
666 help
667 Say Y here if you want kernel low-level debugging support
668 on Tegra based platforms.
669
670 config TEGRA_DEBUG_UARTC
671 bool "Kernel low-level debugging messages via Tegra UART C"
672 depends on ARCH_TEGRA
673 select DEBUG_TEGRA_UART
674 help
675 Say Y here if you want kernel low-level debugging support
676 on Tegra based platforms.
677
678 config TEGRA_DEBUG_UARTD
679 bool "Kernel low-level debugging messages via Tegra UART D"
680 depends on ARCH_TEGRA
681 select DEBUG_TEGRA_UART
682 help
683 Say Y here if you want kernel low-level debugging support
684 on Tegra based platforms.
685
686 config TEGRA_DEBUG_UARTE
687 bool "Kernel low-level debugging messages via Tegra UART E"
493 depends on ARCH_TEGRA 688 depends on ARCH_TEGRA
494 bool "Use Tegra UART for low-level debug" 689 select DEBUG_TEGRA_UART
495 help 690 help
496 Say Y here if you want kernel low-level debugging support 691 Say Y here if you want kernel low-level debugging support
497 on Tegra based platforms. 692 on Tegra based platforms.
@@ -510,19 +705,32 @@ choice
510 Say Y here if you want the debug print routines to direct 705 Say Y here if you want the debug print routines to direct
511 their output to the uart1 port on SiRFmarco devices. 706 their output to the uart1 port on SiRFmarco devices.
512 707
513 config DEBUG_STI_UART 708 config STIH41X_DEBUG_ASC2
709 bool "Use StiH415/416 ASC2 UART for low-level debug"
710 depends on ARCH_STI
711 select DEBUG_STI_UART
712 help
713 Say Y here if you want kernel low-level debugging support
714 on STiH415/416 based platforms like b2000, which has
715 default UART wired up to ASC2.
716
717 If unsure, say N.
718
719 config STIH41X_DEBUG_SBC_ASC1
720 bool "Use StiH415/416 SBC ASC1 UART for low-level debug"
514 depends on ARCH_STI 721 depends on ARCH_STI
515 bool "Use StiH415/416 ASC for low-level debug" 722 select DEBUG_STI_UART
516 help 723 help
517 Say Y here if you want kernel low-level debugging support 724 Say Y here if you want kernel low-level debugging support
518 on StiH415/416 based platforms like B2000, B2020. 725 on STiH415/416 based platforms like b2020. which has
519 It support UART2 and SBC_UART1. 726 default UART wired up to SBC ASC1.
520 727
521 If unsure, say N. 728 If unsure, say N.
522 729
523 config DEBUG_U300_UART 730 config DEBUG_U300_UART
524 bool "Kernel low-level debugging messages via U300 UART0" 731 bool "Kernel low-level debugging messages via U300 UART0"
525 depends on ARCH_U300 732 depends on ARCH_U300
733 select DEBUG_UART_PL01X
526 help 734 help
527 Say Y here if you want the debug print routines to direct 735 Say Y here if you want the debug print routines to direct
528 their output to the uart port on U300 devices. 736 their output to the uart port on U300 devices.
@@ -548,6 +756,7 @@ choice
548 config DEBUG_VEXPRESS_UART0_CA9 756 config DEBUG_VEXPRESS_UART0_CA9
549 bool "Use PL011 UART0 at 0x10009000 (V2P-CA9 core tile)" 757 bool "Use PL011 UART0 at 0x10009000 (V2P-CA9 core tile)"
550 depends on ARCH_VEXPRESS 758 depends on ARCH_VEXPRESS
759 select DEBUG_UART_PL01X
551 help 760 help
552 This option selects UART0 at 0x10009000. Except for custom models, 761 This option selects UART0 at 0x10009000. Except for custom models,
553 this applies only to the V2P-CA9 tile. 762 this applies only to the V2P-CA9 tile.
@@ -555,6 +764,7 @@ choice
555 config DEBUG_VEXPRESS_UART0_RS1 764 config DEBUG_VEXPRESS_UART0_RS1
556 bool "Use PL011 UART0 at 0x1c090000 (RS1 complaint tiles)" 765 bool "Use PL011 UART0 at 0x1c090000 (RS1 complaint tiles)"
557 depends on ARCH_VEXPRESS 766 depends on ARCH_VEXPRESS
767 select DEBUG_UART_PL01X
558 help 768 help
559 This option selects UART0 at 0x1c090000. This applies to most 769 This option selects UART0 at 0x1c090000. This applies to most
560 of the tiles using the RS1 memory map, including all new A-class 770 of the tiles using the RS1 memory map, including all new A-class
@@ -563,6 +773,7 @@ choice
563 config DEBUG_VEXPRESS_UART0_CRX 773 config DEBUG_VEXPRESS_UART0_CRX
564 bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)" 774 bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)"
565 depends on ARCH_VEXPRESS && !MMU 775 depends on ARCH_VEXPRESS && !MMU
776 select DEBUG_UART_PL01X
566 help 777 help
567 This option selects UART0 at 0xb0090000. This is appropriate for 778 This option selects UART0 at 0xb0090000. This is appropriate for
568 Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7 779 Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7
@@ -579,7 +790,7 @@ choice
579 depends on !ARCH_MULTIPLATFORM 790 depends on !ARCH_MULTIPLATFORM
580 help 791 help
581 Say Y here if your platform doesn't provide a UART option 792 Say Y here if your platform doesn't provide a UART option
582 below. This relies on your platform choosing the right UART 793 above. This relies on your platform choosing the right UART
583 definition internally in order for low-level debugging to 794 definition internally in order for low-level debugging to
584 work. 795 work.
585 796
@@ -610,11 +821,41 @@ choice
610 For more details about semihosting, please see 821 For more details about semihosting, please see
611 chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd. 822 chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd.
612 823
824 config DEBUG_LL_UART_8250
825 bool "Kernel low-level debugging via 8250 UART"
826 help
827 Say Y here if you wish the debug print routes to direct
828 their output to an 8250 UART. You can use this option
829 to provide the parameters for the 8250 UART rather than
830 selecting one of the platform specific options above if
831 you know the parameters for the port.
832
833 This option is preferred over the platform specific
834 options; the platform specific options are deprecated
835 and will be soon removed.
836
837 config DEBUG_LL_UART_PL01X
838 bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
839 help
840 Say Y here if you wish the debug print routes to direct
841 their output to a PL01x Primecell UART. You can use
842 this option to provide the parameters for the UART
843 rather than selecting one of the platform specific
844 options above if you know the parameters for the port.
845
846 This option is preferred over the platform specific
847 options; the platform specific options are deprecated
848 and will be soon removed.
849
613endchoice 850endchoice
614 851
615config DEBUG_EXYNOS_UART 852config DEBUG_EXYNOS_UART
616 bool 853 bool
617 854
855config DEBUG_OMAP2PLUS_UART
856 bool
857 depends on ARCH_OMAP2PLUS
858
618config DEBUG_IMX_UART_PORT 859config DEBUG_IMX_UART_PORT
619 int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \ 860 int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \
620 DEBUG_IMX25_UART || \ 861 DEBUG_IMX25_UART || \
@@ -631,140 +872,19 @@ config DEBUG_IMX_UART_PORT
631 Choose UART port on which kernel low-level debug messages 872 Choose UART port on which kernel low-level debug messages
632 should be output. 873 should be output.
633 874
634choice 875config DEBUG_TEGRA_UART
635 prompt "Low-level debug console UART" 876 bool
636 depends on DEBUG_OMAP2PLUS_UART 877 depends on ARCH_TEGRA
637
638 config DEBUG_OMAP2UART1
639 bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
640 help
641 This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
642 omap3 torpedo and 3530 lv som.
643
644 config DEBUG_OMAP2UART2
645 bool "OMAP2/3/4 UART2"
646
647 config DEBUG_OMAP2UART3
648 bool "OMAP2 UART3 (n8x0)"
649
650 config DEBUG_OMAP3UART3
651 bool "OMAP3 UART3 (most omap3 boards)"
652 help
653 This covers at least cm_t3x, beagle, crane, devkit8000,
654 igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
655 and 3517evm.
656
657 config DEBUG_OMAP4UART3
658 bool "OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
659
660 config DEBUG_OMAP3UART4
661 bool "OMAP36XX UART4"
662
663 config DEBUG_OMAP4UART4
664 bool "OMAP4/5 UART4"
665
666 config DEBUG_TI81XXUART1
667 bool "TI81XX UART1 (ti8148evm)"
668
669 config DEBUG_TI81XXUART2
670 bool "TI81XX UART2"
671
672 config DEBUG_TI81XXUART3
673 bool "TI81XX UART3 (ti8168evm)"
674
675 config DEBUG_AM33XXUART1
676 bool "AM33XX UART1"
677
678 config DEBUG_ZOOM_UART
679 bool "Zoom2/3 UART"
680endchoice
681
682choice
683 prompt "Low-level debug console UART"
684 depends on DEBUG_ROCKCHIP_UART
685
686 config DEBUG_RK29_UART0
687 bool "RK29 UART0"
688
689 config DEBUG_RK29_UART1
690 bool "RK29 UART1"
691
692 config DEBUG_RK29_UART2
693 bool "RK29 UART2"
694
695 config DEBUG_RK3X_UART0
696 bool "RK3X UART0"
697
698 config DEBUG_RK3X_UART1
699 bool "RK3X UART1"
700
701 config DEBUG_RK3X_UART2
702 bool "RK3X UART2"
703
704 config DEBUG_RK3X_UART3
705 bool "RK3X UART3"
706endchoice
707
708choice
709 prompt "Low-level debug console UART"
710 depends on DEBUG_LL && DEBUG_TEGRA_UART
711
712 config TEGRA_DEBUG_UART_AUTO_ODMDATA
713 bool "Via ODMDATA"
714 help
715 Automatically determines which UART to use for low-level debug based
716 on the ODMDATA value. This value is part of the BCT, and is written
717 to the boot memory device using nvflash, or other flashing tool.
718 When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
719 0/1/2/3/4 are UART A/B/C/D/E.
720
721 config TEGRA_DEBUG_UARTA
722 bool "UART A"
723
724 config TEGRA_DEBUG_UARTB
725 bool "UART B"
726
727 config TEGRA_DEBUG_UARTC
728 bool "UART C"
729
730 config TEGRA_DEBUG_UARTD
731 bool "UART D"
732
733 config TEGRA_DEBUG_UARTE
734 bool "UART E"
735
736endchoice
737
738choice
739 prompt "Low-level debug console UART"
740 depends on DEBUG_LL && DEBUG_STI_UART
741
742 config STIH41X_DEBUG_ASC2
743 bool "ASC2 UART"
744 help
745 Say Y here if you want kernel low-level debugging support
746 on STiH415/416 based platforms like b2000, which has
747 default UART wired up to ASC2.
748
749 If unsure, say N.
750
751 config STIH41X_DEBUG_SBC_ASC1
752 bool "SBC ASC1 UART"
753 help
754 Say Y here if you want kernel low-level debugging support
755 on STiH415/416 based platforms like b2020. which has
756 default UART wired up to SBC ASC1.
757
758 If unsure, say N.
759 878
760endchoice 879config DEBUG_STI_UART
880 bool
881 depends on ARCH_STI
761 882
762config DEBUG_LL_INCLUDE 883config DEBUG_LL_INCLUDE
763 string 884 string
764 default "debug/bcm2835.S" if DEBUG_BCM2835 885 default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
765 default "debug/cns3xxx.S" if DEBUG_CNS3XXX 886 default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
766 default "debug/exynos.S" if DEBUG_EXYNOS_UART 887 default "debug/exynos.S" if DEBUG_EXYNOS_UART
767 default "debug/highbank.S" if DEBUG_HIGHBANK_UART
768 default "debug/icedcc.S" if DEBUG_ICEDCC 888 default "debug/icedcc.S" if DEBUG_ICEDCC
769 default "debug/imx.S" if DEBUG_IMX1_UART || \ 889 default "debug/imx.S" if DEBUG_IMX1_UART || \
770 DEBUG_IMX25_UART || \ 890 DEBUG_IMX25_UART || \
@@ -775,43 +895,175 @@ config DEBUG_LL_INCLUDE
775 DEBUG_IMX53_UART ||\ 895 DEBUG_IMX53_UART ||\
776 DEBUG_IMX6Q_UART || \ 896 DEBUG_IMX6Q_UART || \
777 DEBUG_IMX6SL_UART 897 DEBUG_IMX6SL_UART
778 default "debug/keystone.S" if DEBUG_KEYSTONE_UART0 || \
779 DEBUG_KEYSTONE_UART1
780 default "debug/msm.S" if DEBUG_MSM_UART1 || \ 898 default "debug/msm.S" if DEBUG_MSM_UART1 || \
781 DEBUG_MSM_UART2 || \ 899 DEBUG_MSM_UART2 || \
782 DEBUG_MSM_UART3 || \ 900 DEBUG_MSM_UART3 || \
783 DEBUG_MSM8660_UART || \ 901 DEBUG_MSM8660_UART || \
784 DEBUG_MSM8960_UART 902 DEBUG_MSM8960_UART
785 default "debug/mvebu.S" if DEBUG_MVEBU_UART || \
786 DEBUG_MVEBU_UART_ALTERNATE
787 default "debug/mxs.S" if DEBUG_IMX23_UART || DEBUG_IMX28_UART
788 default "debug/nomadik.S" if DEBUG_NOMADIK_UART
789 default "debug/nspire.S" if DEBUG_NSPIRE_CX_UART || \
790 DEBUG_NSPIRE_CLASSIC_UART
791 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART 903 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
792 default "debug/picoxcell.S" if DEBUG_PICOXCELL_UART
793 default "debug/pxa.S" if DEBUG_PXA_UART1 || DEBUG_MMP_UART2 || \
794 DEBUG_MMP_UART3
795 default "debug/rockchip.S" if DEBUG_ROCKCHIP_UART
796 default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1 904 default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
797 default "debug/socfpga.S" if DEBUG_SOCFPGA_UART
798 default "debug/sti.S" if DEBUG_STI_UART 905 default "debug/sti.S" if DEBUG_STI_UART
799 default "debug/sunxi.S" if DEBUG_SUNXI_UART0 || DEBUG_SUNXI_UART1
800 default "debug/tegra.S" if DEBUG_TEGRA_UART 906 default "debug/tegra.S" if DEBUG_TEGRA_UART
801 default "debug/u300.S" if DEBUG_U300_UART
802 default "debug/ux500.S" if DEBUG_UX500_UART 907 default "debug/ux500.S" if DEBUG_UX500_UART
803 default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \ 908 default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT
804 DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \
805 DEBUG_VEXPRESS_UART0_CRX
806 default "debug/vt8500.S" if DEBUG_VT8500_UART0 909 default "debug/vt8500.S" if DEBUG_VT8500_UART0
807 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1 910 default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
808 default "mach/debug-macro.S" 911 default "mach/debug-macro.S"
809 912
913# Compatibility options for PL01x
914config DEBUG_UART_PL01X
915 def_bool ARCH_EP93XX || \
916 ARCH_INTEGRATOR || \
917 ARCH_SPEAR3XX || \
918 ARCH_SPEAR6XX || \
919 ARCH_SPEAR13XX || \
920 ARCH_VERSATILE
921
922# Compatibility options for 8250
923config DEBUG_UART_8250
924 def_bool ARCH_DOVE || ARCH_EBSA110 || \
925 (FOOTBRIDGE && !DEBUG_DC21285_PORT) || \
926 ARCH_GEMINI || ARCH_IOP13XX || ARCH_IOP32X || \
927 ARCH_IOP33X || ARCH_IXP4XX || ARCH_KIRKWOOD || \
928 ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
929
930config DEBUG_UART_PHYS
931 hex "Physical base address of debug UART"
932 default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
933 default 0x01c28000 if DEBUG_SUNXI_UART0
934 default 0x01c28400 if DEBUG_SUNXI_UART1
935 default 0x01d0c000 if DEBUG_DAVINCI_DA8XX_UART1
936 default 0x01d0d000 if DEBUG_DAVINCI_DA8XX_UART2
937 default 0x02530c00 if DEBUG_KEYSTONE_UART0
938 default 0x02531000 if DEBUG_KEYSTONE_UART1
939 default 0x03010fe0 if ARCH_RPC
940 default 0x08108300 if DEBUG_DAVINCI_TNETV107X_UART1
941 default 0x10009000 if DEBUG_REALVIEW_STD_PORT || DEBUG_CNS3XXX || \
942 DEBUG_VEXPRESS_UART0_CA9
943 default 0x1010c000 if DEBUG_REALVIEW_PB1176_PORT
944 default 0x10124000 if DEBUG_RK3X_UART0
945 default 0x10126000 if DEBUG_RK3X_UART1
946 default 0x101f1000 if ARCH_VERSATILE
947 default 0x101fb000 if DEBUG_NOMADIK_UART
948 default 0x16000000 if ARCH_INTEGRATOR
949 default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
950 default 0x20060000 if DEBUG_RK29_UART0
951 default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
952 default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
953 default 0x20201000 if DEBUG_BCM2835
954 default 0x40090000 if ARCH_LPC32XX
955 default 0x40100000 if DEBUG_PXA_UART1
956 default 0x42000000 if ARCH_GEMINI
957 default 0x7c0003f8 if FOOTBRIDGE
958 default 0x80230000 if DEBUG_PICOXCELL_UART
959 default 0x80070000 if DEBUG_IMX23_UART
960 default 0x80074000 if DEBUG_IMX28_UART
961 default 0x808c0000 if ARCH_EP93XX
962 default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
963 default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
964 default 0xc0013000 if DEBUG_U300_UART
965 default 0xc8000000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
966 default 0xc8000003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
967 default 0xd0000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
968 default 0xd0012000 if DEBUG_MVEBU_UART
969 default 0xd4017000 if DEBUG_MMP_UART2
970 default 0xd4018000 if DEBUG_MMP_UART3
971 default 0xe0000000 if ARCH_SPEAR13XX
972 default 0xf0000be0 if ARCH_EBSA110
973 default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE
974 default 0xf1012000 if ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
975 ARCH_ORION5X
976 default 0xf8b00000 if DEBUG_HI3716_UART
977 default 0xfcb00000 if DEBUG_HI3620_UART
978 default 0xfe800000 if ARCH_IOP32X
979 default 0xffc02000 if DEBUG_SOCFPGA_UART
980 default 0xffd82340 if ARCH_IOP13XX
981 default 0xfff36000 if DEBUG_HIGHBANK_UART
982 default 0xfffff700 if ARCH_IOP33X
983 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
984 DEBUG_UART_8250 || DEBUG_UART_PL01X
985
986config DEBUG_UART_VIRT
987 hex "Virtual base address of debug UART"
988 default 0xe0010fe0 if ARCH_RPC
989 default 0xf0000be0 if ARCH_EBSA110
990 default 0xf0009000 if DEBUG_CNS3XXX
991 default 0xf01fb000 if DEBUG_NOMADIK_UART
992 default 0xf0201000 if DEBUG_BCM2835
993 default 0xf11f1000 if ARCH_VERSATILE
994 default 0xf1600000 if ARCH_INTEGRATOR
995 default 0xf1c28000 if DEBUG_SUNXI_UART0
996 default 0xf1c28400 if DEBUG_SUNXI_UART1
997 default 0xf2100000 if DEBUG_PXA_UART1
998 default 0xf4090000 if ARCH_LPC32XX
999 default 0xf4200000 if ARCH_GEMINI
1000 default 0xf8009000 if DEBUG_VEXPRESS_UART0_CA9
1001 default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1
1002 default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
1003 default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
1004 default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
1005 default 0xfd000000 if ARCH_SPEAR13XX
1006 default 0xfd012000 if ARCH_MV78XX0
1007 default 0xfde12000 if ARCH_DOVE
1008 default 0xfe012000 if ARCH_ORION5X
1009 default 0xfe017000 if DEBUG_MMP_UART2
1010 default 0xfe018000 if DEBUG_MMP_UART3
1011 default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
1012 default 0xfe230000 if DEBUG_PICOXCELL_UART
1013 default 0xfe800000 if ARCH_IOP32X
1014 default 0xfeb00000 if DEBUG_HI3620_UART || DEBUG_HI3716_UART
1015 default 0xfeb24000 if DEBUG_RK3X_UART0
1016 default 0xfeb26000 if DEBUG_RK3X_UART1
1017 default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
1018 default 0xfeb31000 if DEBUG_KEYSTONE_UART1
1019 default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
1020 default 0xfed60000 if DEBUG_RK29_UART0
1021 default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
1022 default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
1023 default 0xfec02000 if DEBUG_SOCFPGA_UART
1024 default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
1025 default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
1026 default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
1027 default 0xfed12000 if ARCH_KIRKWOOD
1028 default 0xfedc0000 if ARCH_EP93XX
1029 default 0xfee003f8 if FOOTBRIDGE
1030 default 0xfee08300 if DEBUG_DAVINCI_TNETV107X_UART1
1031 default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
1032 default 0xfef36000 if DEBUG_HIGHBANK_UART
1033 default 0xfee82340 if ARCH_IOP13XX
1034 default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
1035 default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
1036 default 0xfefff700 if ARCH_IOP33X
1037 default 0xff003000 if DEBUG_U300_UART
1038 default DEBUG_UART_PHYS if !MMU
1039 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
1040 DEBUG_UART_8250 || DEBUG_UART_PL01X
1041
1042config DEBUG_UART_8250_SHIFT
1043 int "Register offset shift for the 8250 debug UART"
1044 depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
1045 default 0 if FOOTBRIDGE || ARCH_IOP32X
1046 default 2
1047
1048config DEBUG_UART_8250_WORD
1049 bool "Use 32-bit accesses for 8250 UART"
1050 depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
1051 depends on DEBUG_UART_8250_SHIFT >= 2
1052 default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
1053 ARCH_KEYSTONE || \
1054 DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
1055 DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_DAVINCI_TNETV107X_UART1
1056
1057config DEBUG_UART_8250_FLOW_CONTROL
1058 bool "Enable flow control for 8250 UART"
1059 depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
1060 default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_GEMINI || ARCH_RPC
1061
810config DEBUG_UNCOMPRESS 1062config DEBUG_UNCOMPRESS
811 bool 1063 bool
812 depends on ARCH_MULTIPLATFORM || ARCH_MSM 1064 depends on ARCH_MULTIPLATFORM || ARCH_MSM
813 default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \ 1065 default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
814 !DEBUG_TEGRA_UART 1066 (!DEBUG_TEGRA_UART || !ZBOOT_ROM)
815 help 1067 help
816 This option influences the normal decompressor output for 1068 This option influences the normal decompressor output for
817 multiplatform kernels. Normally, multiplatform kernels disable 1069 multiplatform kernels. Normally, multiplatform kernels disable
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
index e2d636336b7c..e7f80928949c 100644
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ b/arch/arm/boot/compressed/head-shmobile.S
@@ -55,12 +55,47 @@ __tmp_stack:
55__continue: 55__continue:
56#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */ 56#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
57 57
58 /* Set board ID necessary for boot */ 58 adr r0, dtb_info
59 ldr r7, 1f @ Set machine type register 59 ldmia r0, {r1, r3, r4, r5, r7}
60 mov r8, #0 @ pass null pointer as atag 60
61 sub r0, r0, r1 @ calculate the delta offset
62 add r5, r5, r0 @ _edata
63
64 ldr lr, [r5, #0] @ check if valid DTB is present
65 cmp lr, r3
66 bne 0f
67
68 add r9, r7, #31 @ rounded up to a multiple
69 bic r9, r9, #31 @ ... of 32 bytes
70
71 add r6, r9, r5 @ copy from _edata
72 add r9, r9, r4 @ to MEMORY_START
73
741: ldmdb r6!, {r0 - r3, r10 - r12, lr}
75 cmp r6, r5
76 stmdb r9!, {r0 - r3, r10 - r12, lr}
77 bhi 1b
78
79 /* Success: Zero board ID, pointer to start of memory for atag/dtb */
80 mov r7, #0
81 mov r8, r4
61 b 2f 82 b 2f
62 83
631 : .long MACH_TYPE 84 .align 2
85dtb_info:
86 .word dtb_info
87#ifndef __ARMEB__
88 .word 0xedfe0dd0 @ sig is 0xd00dfeed big endian
89#else
90 .word 0xd00dfeed
91#endif
92 .word MEMORY_START
93 .word _edata
94 .word 0x4000 @ maximum DTB size
950:
96 /* Failure: Zero board ID, NULL atag/dtb */
97 mov r7, #0
98 mov r8, #0 @ pass null pointer as atag
642 : 992 :
65 100
66#endif /* CONFIG_ZBOOT_ROM */ 101#endif /* CONFIG_ZBOOT_ROM */
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 444b4ede0d60..d318987d44a1 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -120,6 +120,35 @@
120 status = "okay"; 120 status = "okay";
121 }; 121 };
122 122
123 musb: usb@47400000 {
124 status = "okay";
125
126 control@44e10000 {
127 status = "okay";
128 };
129
130 usb-phy@47401300 {
131 status = "okay";
132 };
133
134 usb-phy@47401b00 {
135 status = "okay";
136 };
137
138 usb@47401000 {
139 status = "okay";
140 };
141
142 usb@47401800 {
143 status = "okay";
144 dr_mode = "host";
145 };
146
147 dma-controller@07402000 {
148 status = "okay";
149 };
150 };
151
123 i2c0: i2c@44e0b000 { 152 i2c0: i2c@44e0b000 {
124 pinctrl-names = "default"; 153 pinctrl-names = "default";
125 pinctrl-0 = <&i2c0_pins>; 154 pinctrl-0 = <&i2c0_pins>;
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 3aee1a43782d..e8ec8756e498 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -171,6 +171,35 @@
171 }; 171 };
172 }; 172 };
173 173
174 musb: usb@47400000 {
175 status = "okay";
176
177 control@44e10000 {
178 status = "okay";
179 };
180
181 usb-phy@47401300 {
182 status = "okay";
183 };
184
185 usb-phy@47401b00 {
186 status = "okay";
187 };
188
189 usb@47401000 {
190 status = "okay";
191 };
192
193 usb@47401800 {
194 status = "okay";
195 dr_mode = "host";
196 };
197
198 dma-controller@07402000 {
199 status = "okay";
200 };
201 };
202
174 i2c1: i2c@4802a000 { 203 i2c1: i2c@4802a000 {
175 pinctrl-names = "default"; 204 pinctrl-names = "default";
176 pinctrl-0 = <&i2c1_pins>; 205 pinctrl-0 = <&i2c1_pins>;
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 0c8ad173d2b0..4f339fa91c57 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -14,6 +14,7 @@
14/dts-v1/; 14/dts-v1/;
15 15
16#include "am33xx.dtsi" 16#include "am33xx.dtsi"
17#include <dt-bindings/pwm/pwm.h>
17 18
18/ { 19/ {
19 model = "TI AM335x EVM-SK"; 20 model = "TI AM335x EVM-SK";
@@ -207,6 +208,22 @@
207 }; 208 };
208 }; 209 };
209 210
211 musb: usb@47400000 {
212 status = "okay";
213
214 control@44e10000 {
215 status = "okay";
216 };
217
218 usb-phy@47401300 {
219 status = "okay";
220 };
221
222 usb@47401000 {
223 status = "okay";
224 };
225 };
226
210 epwmss2: epwmss@48304000 { 227 epwmss2: epwmss@48304000 {
211 status = "okay"; 228 status = "okay";
212 229
@@ -298,7 +315,7 @@
298 315
299 backlight { 316 backlight {
300 compatible = "pwm-backlight"; 317 compatible = "pwm-backlight";
301 pwms = <&ecap2 0 50000 1>; 318 pwms = <&ecap2 0 50000 PWM_POLARITY_INVERTED>;
302 brightness-levels = <0 58 61 66 75 90 125 170 255>; 319 brightness-levels = <0 58 61 66 75 90 125 170 255>;
303 default-brightness-level = <8>; 320 default-brightness-level = <8>;
304 }; 321 };
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 38b446ba1ce1..f9c5da9c7fe1 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -26,6 +26,10 @@
26 serial5 = &uart5; 26 serial5 = &uart5;
27 d_can0 = &dcan0; 27 d_can0 = &dcan0;
28 d_can1 = &dcan1; 28 d_can1 = &dcan1;
29 usb0 = &usb0;
30 usb1 = &usb1;
31 phy0 = &usb0_phy;
32 phy1 = &usb1_phy;
29 }; 33 };
30 34
31 cpus { 35 cpus {
@@ -333,21 +337,132 @@
333 status = "disabled"; 337 status = "disabled";
334 }; 338 };
335 339
336 usb@47400000 { 340 usb: usb@47400000 {
337 compatible = "ti,musb-am33xx"; 341 compatible = "ti,am33xx-usb";
338 reg = <0x47400000 0x1000 /* usbss */ 342 reg = <0x47400000 0x1000>;
339 0x47401000 0x800 /* musb instance 0 */ 343 ranges;
340 0x47401800 0x800>; /* musb instance 1 */ 344 #address-cells = <1>;
341 interrupts = <17 /* usbss */ 345 #size-cells = <1>;
342 18 /* musb instance 0 */
343 19>; /* musb instance 1 */
344 multipoint = <1>;
345 num-eps = <16>;
346 ram-bits = <12>;
347 port0-mode = <3>;
348 port1-mode = <3>;
349 power = <250>;
350 ti,hwmods = "usb_otg_hs"; 346 ti,hwmods = "usb_otg_hs";
347 status = "disabled";
348
349 ctrl_mod: control@44e10000 {
350 compatible = "ti,am335x-usb-ctrl-module";
351 reg = <0x44e10620 0x10
352 0x44e10648 0x4>;
353 reg-names = "phy_ctrl", "wakeup";
354 status = "disabled";
355 };
356
357 usb0_phy: usb-phy@47401300 {
358 compatible = "ti,am335x-usb-phy";
359 reg = <0x47401300 0x100>;
360 reg-names = "phy";
361 status = "disabled";
362 ti,ctrl_mod = <&ctrl_mod>;
363 };
364
365 usb0: usb@47401000 {
366 compatible = "ti,musb-am33xx";
367 status = "disabled";
368 reg = <0x47401400 0x400
369 0x47401000 0x200>;
370 reg-names = "mc", "control";
371
372 interrupts = <18>;
373 interrupt-names = "mc";
374 dr_mode = "otg";
375 mentor,multipoint = <1>;
376 mentor,num-eps = <16>;
377 mentor,ram-bits = <12>;
378 mentor,power = <500>;
379 phys = <&usb0_phy>;
380
381 dmas = <&cppi41dma 0 0 &cppi41dma 1 0
382 &cppi41dma 2 0 &cppi41dma 3 0
383 &cppi41dma 4 0 &cppi41dma 5 0
384 &cppi41dma 6 0 &cppi41dma 7 0
385 &cppi41dma 8 0 &cppi41dma 9 0
386 &cppi41dma 10 0 &cppi41dma 11 0
387 &cppi41dma 12 0 &cppi41dma 13 0
388 &cppi41dma 14 0 &cppi41dma 0 1
389 &cppi41dma 1 1 &cppi41dma 2 1
390 &cppi41dma 3 1 &cppi41dma 4 1
391 &cppi41dma 5 1 &cppi41dma 6 1
392 &cppi41dma 7 1 &cppi41dma 8 1
393 &cppi41dma 9 1 &cppi41dma 10 1
394 &cppi41dma 11 1 &cppi41dma 12 1
395 &cppi41dma 13 1 &cppi41dma 14 1>;
396 dma-names =
397 "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
398 "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
399 "rx14", "rx15",
400 "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
401 "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
402 "tx14", "tx15";
403 };
404
405 usb1_phy: usb-phy@47401b00 {
406 compatible = "ti,am335x-usb-phy";
407 reg = <0x47401b00 0x100>;
408 reg-names = "phy";
409 status = "disabled";
410 ti,ctrl_mod = <&ctrl_mod>;
411 };
412
413 usb1: usb@47401800 {
414 compatible = "ti,musb-am33xx";
415 status = "disabled";
416 reg = <0x47401c00 0x400
417 0x47401800 0x200>;
418 reg-names = "mc", "control";
419 interrupts = <19>;
420 interrupt-names = "mc";
421 dr_mode = "otg";
422 mentor,multipoint = <1>;
423 mentor,num-eps = <16>;
424 mentor,ram-bits = <12>;
425 mentor,power = <500>;
426 phys = <&usb1_phy>;
427
428 dmas = <&cppi41dma 15 0 &cppi41dma 16 0
429 &cppi41dma 17 0 &cppi41dma 18 0
430 &cppi41dma 19 0 &cppi41dma 20 0
431 &cppi41dma 21 0 &cppi41dma 22 0
432 &cppi41dma 23 0 &cppi41dma 24 0
433 &cppi41dma 25 0 &cppi41dma 26 0
434 &cppi41dma 27 0 &cppi41dma 28 0
435 &cppi41dma 29 0 &cppi41dma 15 1
436 &cppi41dma 16 1 &cppi41dma 17 1
437 &cppi41dma 18 1 &cppi41dma 19 1
438 &cppi41dma 20 1 &cppi41dma 21 1
439 &cppi41dma 22 1 &cppi41dma 23 1
440 &cppi41dma 24 1 &cppi41dma 25 1
441 &cppi41dma 26 1 &cppi41dma 27 1
442 &cppi41dma 28 1 &cppi41dma 29 1>;
443 dma-names =
444 "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
445 "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
446 "rx14", "rx15",
447 "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
448 "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
449 "tx14", "tx15";
450 };
451
452 cppi41dma: dma-controller@07402000 {
453 compatible = "ti,am3359-cppi41";
454 reg = <0x47400000 0x1000
455 0x47402000 0x1000
456 0x47403000 0x1000
457 0x47404000 0x4000>;
458 reg-names = "glue", "controller", "scheduler", "queuemgr";
459 interrupts = <17>;
460 interrupt-names = "glue";
461 #dma-cells = <2>;
462 #dma-channels = <30>;
463 #dma-requests = <256>;
464 status = "disabled";
465 };
351 }; 466 };
352 467
353 epwmss0: epwmss@48300000 { 468 epwmss0: epwmss@48300000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a0..3d77dbe406f4 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; 14 compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
15 15
16 chosen { 16 chosen {
17 bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; 17 bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
18 }; 18 };
19 19
20 memory { 20 memory {
21 reg = <0x20000000 0x10000000>; 21 reg = <0x20000000 0x8000000>;
22 }; 22 };
23 23
24 clocks { 24 clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b2058..49e3c45818c2 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
94 94
95 usb0: ohci@00600000 { 95 usb0: ohci@00600000 {
96 status = "okay"; 96 status = "okay";
97 num-ports = <2>; 97 num-ports = <3>;
98 atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW 98 atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
99 &pioD 19 GPIO_ACTIVE_LOW
99 &pioD 20 GPIO_ACTIVE_LOW 100 &pioD 20 GPIO_ACTIVE_LOW
100 >; 101 >;
101 }; 102 };
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index a0f2721ea583..8678e0c11119 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -329,6 +329,12 @@
329 sirf,function = "uart0"; 329 sirf,function = "uart0";
330 }; 330 };
331 }; 331 };
332 uart0_noflow_pins_a: uart0@1 {
333 uart {
334 sirf,pins = "uart0_nostreamctrlgrp";
335 sirf,function = "uart0_nostreamctrl";
336 };
337 };
332 uart1_pins_a: uart1@0 { 338 uart1_pins_a: uart1@0 {
333 uart { 339 uart {
334 sirf,pins = "uart1grp"; 340 sirf,pins = "uart1grp";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 24c0888dd409..f2dfa6b1f1a1 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -405,7 +405,7 @@
405 }; 405 };
406 406
407 i2s0: i2s@03830000 { 407 i2s0: i2s@03830000 {
408 compatible = "samsung,i2s-v5"; 408 compatible = "samsung,s5pv210-i2s";
409 reg = <0x03830000 0x100>; 409 reg = <0x03830000 0x100>;
410 dmas = <&pdma0 10 410 dmas = <&pdma0 10
411 &pdma0 9 411 &pdma0 9
@@ -415,16 +415,13 @@
415 <&clock_audss EXYNOS_I2S_BUS>, 415 <&clock_audss EXYNOS_I2S_BUS>,
416 <&clock_audss EXYNOS_SCLK_I2S>; 416 <&clock_audss EXYNOS_SCLK_I2S>;
417 clock-names = "iis", "i2s_opclk0", "i2s_opclk1"; 417 clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
418 samsung,supports-6ch;
419 samsung,supports-rstclr;
420 samsung,supports-secdai;
421 samsung,idma-addr = <0x03000000>; 418 samsung,idma-addr = <0x03000000>;
422 pinctrl-names = "default"; 419 pinctrl-names = "default";
423 pinctrl-0 = <&i2s0_bus>; 420 pinctrl-0 = <&i2s0_bus>;
424 }; 421 };
425 422
426 i2s1: i2s@12D60000 { 423 i2s1: i2s@12D60000 {
427 compatible = "samsung,i2s-v5"; 424 compatible = "samsung,s3c6410-i2s";
428 reg = <0x12D60000 0x100>; 425 reg = <0x12D60000 0x100>;
429 dmas = <&pdma1 12 426 dmas = <&pdma1 12
430 &pdma1 11>; 427 &pdma1 11>;
@@ -436,7 +433,7 @@
436 }; 433 };
437 434
438 i2s2: i2s@12D70000 { 435 i2s2: i2s@12D70000 {
439 compatible = "samsung,i2s-v5"; 436 compatible = "samsung,s3c6410-i2s";
440 reg = <0x12D70000 0x100>; 437 reg = <0x12D70000 0x100>;
441 dmas = <&pdma0 12 438 dmas = <&pdma0 12
442 &pdma0 11>; 439 &pdma0 11>;
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index 0cedba4d8529..1b81f36896bc 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -248,6 +248,7 @@
248 #interrupt-cells = <1>; 248 #interrupt-cells = <1>;
249 interrupt-map-mask = <0 0 0 0>; 249 interrupt-map-mask = <0 0 0 0>;
250 interrupt-map = <0x0 0 &gic 53>; 250 interrupt-map = <0x0 0 &gic 53>;
251 num-lanes = <4>;
251 }; 252 };
252 253
253 pcie@2a0000 { 254 pcie@2a0000 {
@@ -267,5 +268,6 @@
267 #interrupt-cells = <1>; 268 #interrupt-cells = <1>;
268 interrupt-map-mask = <0 0 0 0>; 269 interrupt-map-mask = <0 0 0 0>;
269 interrupt-map = <0x0 0 &gic 56>; 270 interrupt-map = <0x0 0 &gic 56>;
271 num-lanes = <4>;
270 }; 272 };
271}; 273};
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index e035f4664b97..15715d921d14 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -220,6 +220,7 @@
220 auart0: serial@8006a000 { 220 auart0: serial@8006a000 {
221 pinctrl-names = "default"; 221 pinctrl-names = "default";
222 pinctrl-0 = <&auart0_pins_a>; 222 pinctrl-0 = <&auart0_pins_a>;
223 fsl,uart-has-rtscts;
223 status = "okay"; 224 status = "okay";
224 }; 225 };
225 226
diff --git a/arch/arm/boot/dts/keystone.dts b/arch/arm/boot/dts/keystone.dts
index 1334b42c6b77..a68e34bbecb2 100644
--- a/arch/arm/boot/dts/keystone.dts
+++ b/arch/arm/boot/dts/keystone.dts
@@ -7,7 +7,9 @@
7 */ 7 */
8 8
9/dts-v1/; 9/dts-v1/;
10/include/ "skeleton.dtsi" 10#include <dt-bindings/interrupt-controller/arm-gic.h>
11
12#include "skeleton.dtsi"
11 13
12/ { 14/ {
13 model = "Texas Instruments Keystone 2 SoC"; 15 model = "Texas Instruments Keystone 2 SoC";
@@ -67,18 +69,23 @@
67 69
68 timer { 70 timer {
69 compatible = "arm,armv7-timer"; 71 compatible = "arm,armv7-timer";
70 interrupts = <1 13 0xf08>, 72 interrupts =
71 <1 14 0xf08>, 73 <GIC_PPI 13
72 <1 11 0xf08>, 74 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
73 <1 10 0x308>; 75 <GIC_PPI 14
76 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
77 <GIC_PPI 11
78 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
79 <GIC_PPI 10
80 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
74 }; 81 };
75 82
76 pmu { 83 pmu {
77 compatible = "arm,cortex-a15-pmu"; 84 compatible = "arm,cortex-a15-pmu";
78 interrupts = <0 20 0xf01>, 85 interrupts = <GIC_SPI 20 IRQ_TYPE_EDGE_RISING>,
79 <0 21 0xf01>, 86 <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
80 <0 22 0xf01>, 87 <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>,
81 <0 23 0xf01>; 88 <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>;
82 }; 89 };
83 90
84 soc { 91 soc {
@@ -100,7 +107,7 @@
100 reg-io-width = <4>; 107 reg-io-width = <4>;
101 reg = <0x02530c00 0x100>; 108 reg = <0x02530c00 0x100>;
102 clock-frequency = <133120000>; 109 clock-frequency = <133120000>;
103 interrupts = <0 277 0xf01>; 110 interrupts = <GIC_SPI 277 IRQ_TYPE_EDGE_RISING>;
104 }; 111 };
105 112
106 uart1: serial@02531000 { 113 uart1: serial@02531000 {
@@ -110,7 +117,7 @@
110 reg-io-width = <4>; 117 reg-io-width = <4>;
111 reg = <0x02531000 0x100>; 118 reg = <0x02531000 0x100>;
112 clock-frequency = <133120000>; 119 clock-frequency = <133120000>;
113 interrupts = <0 280 0xf01>; 120 interrupts = <GIC_SPI 280 IRQ_TYPE_EDGE_RISING>;
114 }; 121 };
115 122
116 }; 123 };
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index cdc010e0f93e..386d42870215 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -38,7 +38,7 @@
38 }; 38 };
39 39
40 serial@19c40000 { 40 serial@19c40000 {
41 compatible = "qcom,msm-hsuart", "qcom,msm-uart"; 41 compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
42 reg = <0x19c40000 0x1000>, 42 reg = <0x19c40000 0x1000>,
43 <0x19c00000 0x1000>; 43 <0x19c00000 0x1000>;
44 interrupts = <0 195 0x0>; 44 interrupts = <0 195 0x0>;
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c46540..93e9f7e0b7ad 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
26 cpu-offset = <0x80000>; 26 cpu-offset = <0x80000>;
27 }; 27 };
28 28
29 msmgpio: gpio@fd510000 { 29 msmgpio: gpio@800000 {
30 compatible = "qcom,msm-gpio"; 30 compatible = "qcom,msm-gpio";
31 gpio-controller; 31 gpio-controller;
32 #gpio-cells = <2>; 32 #gpio-cells = <2>;
@@ -34,11 +34,11 @@
34 interrupts = <0 32 0x4>; 34 interrupts = <0 32 0x4>;
35 interrupt-controller; 35 interrupt-controller;
36 #interrupt-cells = <2>; 36 #interrupt-cells = <2>;
37 reg = <0xfd510000 0x4000>; 37 reg = <0x800000 0x4000>;
38 }; 38 };
39 39
40 serial@16440000 { 40 serial@16440000 {
41 compatible = "qcom,msm-hsuart", "qcom,msm-uart"; 41 compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
42 reg = <0x16440000 0x1000>, 42 reg = <0x16440000 0x1000>,
43 <0x16400000 0x1000>; 43 <0x16400000 0x1000>;
44 interrupts = <0 154 0x0>; 44 interrupts = <0 154 0x0>;
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b72678abff..65d7b601651c 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
235}; 235};
236 236
237&mmc1 { 237&mmc1 {
238 vmmc-supply = <&vmmcsd_fixed>; 238 vmmc-supply = <&ldo9_reg>;
239 bus-width = <4>; 239 bus-width = <4>;
240}; 240};
241 241
@@ -282,6 +282,7 @@
282 282
283 regulators { 283 regulators {
284 smps123_reg: smps123 { 284 smps123_reg: smps123 {
285 /* VDD_OPP_MPU */
285 regulator-name = "smps123"; 286 regulator-name = "smps123";
286 regulator-min-microvolt = < 600000>; 287 regulator-min-microvolt = < 600000>;
287 regulator-max-microvolt = <1500000>; 288 regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
290 }; 291 };
291 292
292 smps45_reg: smps45 { 293 smps45_reg: smps45 {
294 /* VDD_OPP_MM */
293 regulator-name = "smps45"; 295 regulator-name = "smps45";
294 regulator-min-microvolt = < 600000>; 296 regulator-min-microvolt = < 600000>;
295 regulator-max-microvolt = <1310000>; 297 regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
298 }; 300 };
299 301
300 smps6_reg: smps6 { 302 smps6_reg: smps6 {
303 /* VDD_DDR3 - over VDD_SMPS6 */
301 regulator-name = "smps6"; 304 regulator-name = "smps6";
302 regulator-min-microvolt = <1200000>; 305 regulator-min-microvolt = <1200000>;
303 regulator-max-microvolt = <1200000>; 306 regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
306 }; 309 };
307 310
308 smps7_reg: smps7 { 311 smps7_reg: smps7 {
312 /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
309 regulator-name = "smps7"; 313 regulator-name = "smps7";
310 regulator-min-microvolt = <1800000>; 314 regulator-min-microvolt = <1800000>;
311 regulator-max-microvolt = <1800000>; 315 regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
314 }; 318 };
315 319
316 smps8_reg: smps8 { 320 smps8_reg: smps8 {
321 /* VDD_OPP_CORE */
317 regulator-name = "smps8"; 322 regulator-name = "smps8";
318 regulator-min-microvolt = < 600000>; 323 regulator-min-microvolt = < 600000>;
319 regulator-max-microvolt = <1310000>; 324 regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
322 }; 327 };
323 328
324 smps9_reg: smps9 { 329 smps9_reg: smps9 {
330 /* VDDA_2v1_AUD over VDD_2v1 */
325 regulator-name = "smps9"; 331 regulator-name = "smps9";
326 regulator-min-microvolt = <2100000>; 332 regulator-min-microvolt = <2100000>;
327 regulator-max-microvolt = <2100000>; 333 regulator-max-microvolt = <2100000>;
328 regulator-always-on;
329 regulator-boot-on;
330 ti,smps-range = <0x80>; 334 ti,smps-range = <0x80>;
331 }; 335 };
332 336
333 smps10_reg: smps10 { 337 smps10_reg: smps10 {
338 /* VBUS_5V_OTG */
334 regulator-name = "smps10"; 339 regulator-name = "smps10";
335 regulator-min-microvolt = <5000000>; 340 regulator-min-microvolt = <5000000>;
336 regulator-max-microvolt = <5000000>; 341 regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
339 }; 344 };
340 345
341 ldo1_reg: ldo1 { 346 ldo1_reg: ldo1 {
347 /* VDDAPHY_CAM: vdda_csiport */
342 regulator-name = "ldo1"; 348 regulator-name = "ldo1";
343 regulator-min-microvolt = <2800000>; 349 regulator-min-microvolt = <1500000>;
344 regulator-max-microvolt = <2800000>; 350 regulator-max-microvolt = <1800000>;
345 regulator-always-on;
346 regulator-boot-on;
347 }; 351 };
348 352
349 ldo2_reg: ldo2 { 353 ldo2_reg: ldo2 {
354 /* VCC_2V8_DISP: Does not go anywhere */
350 regulator-name = "ldo2"; 355 regulator-name = "ldo2";
351 regulator-min-microvolt = <2900000>; 356 regulator-min-microvolt = <2800000>;
352 regulator-max-microvolt = <2900000>; 357 regulator-max-microvolt = <2800000>;
353 regulator-always-on; 358 /* Unused */
354 regulator-boot-on; 359 status = "disabled";
355 }; 360 };
356 361
357 ldo3_reg: ldo3 { 362 ldo3_reg: ldo3 {
363 /* VDDAPHY_MDM: vdda_lli */
358 regulator-name = "ldo3"; 364 regulator-name = "ldo3";
359 regulator-min-microvolt = <3000000>; 365 regulator-min-microvolt = <1500000>;
360 regulator-max-microvolt = <3000000>; 366 regulator-max-microvolt = <1500000>;
361 regulator-always-on;
362 regulator-boot-on; 367 regulator-boot-on;
368 /* Only if Modem is used */
369 status = "disabled";
363 }; 370 };
364 371
365 ldo4_reg: ldo4 { 372 ldo4_reg: ldo4 {
373 /* VDDAPHY_DISP: vdda_dsiport/hdmi */
366 regulator-name = "ldo4"; 374 regulator-name = "ldo4";
367 regulator-min-microvolt = <2200000>; 375 regulator-min-microvolt = <1500000>;
368 regulator-max-microvolt = <2200000>; 376 regulator-max-microvolt = <1800000>;
369 regulator-always-on;
370 regulator-boot-on;
371 }; 377 };
372 378
373 ldo5_reg: ldo5 { 379 ldo5_reg: ldo5 {
380 /* VDDA_1V8_PHY: usb/sata/hdmi.. */
374 regulator-name = "ldo5"; 381 regulator-name = "ldo5";
375 regulator-min-microvolt = <1800000>; 382 regulator-min-microvolt = <1800000>;
376 regulator-max-microvolt = <1800000>; 383 regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
379 }; 386 };
380 387
381 ldo6_reg: ldo6 { 388 ldo6_reg: ldo6 {
389 /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
382 regulator-name = "ldo6"; 390 regulator-name = "ldo6";
383 regulator-min-microvolt = <1500000>; 391 regulator-min-microvolt = <1200000>;
384 regulator-max-microvolt = <1500000>; 392 regulator-max-microvolt = <1200000>;
385 regulator-always-on; 393 regulator-always-on;
386 regulator-boot-on; 394 regulator-boot-on;
387 }; 395 };
388 396
389 ldo7_reg: ldo7 { 397 ldo7_reg: ldo7 {
398 /* VDD_VPP: vpp1 */
390 regulator-name = "ldo7"; 399 regulator-name = "ldo7";
391 regulator-min-microvolt = <1500000>; 400 regulator-min-microvolt = <2000000>;
392 regulator-max-microvolt = <1500000>; 401 regulator-max-microvolt = <2000000>;
393 regulator-always-on; 402 /* Only for efuse reprograming! */
394 regulator-boot-on; 403 status = "disabled";
395 }; 404 };
396 405
397 ldo8_reg: ldo8 { 406 ldo8_reg: ldo8 {
407 /* VDD_3v0: Does not go anywhere */
398 regulator-name = "ldo8"; 408 regulator-name = "ldo8";
399 regulator-min-microvolt = <1500000>; 409 regulator-min-microvolt = <3000000>;
400 regulator-max-microvolt = <1500000>; 410 regulator-max-microvolt = <3000000>;
401 regulator-always-on;
402 regulator-boot-on; 411 regulator-boot-on;
412 /* Unused */
413 status = "disabled";
403 }; 414 };
404 415
405 ldo9_reg: ldo9 { 416 ldo9_reg: ldo9 {
417 /* VCC_DV_SDIO: vdds_sdcard */
406 regulator-name = "ldo9"; 418 regulator-name = "ldo9";
407 regulator-min-microvolt = <1800000>; 419 regulator-min-microvolt = <1800000>;
408 regulator-max-microvolt = <3300000>; 420 regulator-max-microvolt = <3000000>;
409 regulator-always-on;
410 regulator-boot-on; 421 regulator-boot-on;
411 }; 422 };
412 423
413 ldoln_reg: ldoln { 424 ldoln_reg: ldoln {
425 /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
414 regulator-name = "ldoln"; 426 regulator-name = "ldoln";
415 regulator-min-microvolt = <1800000>; 427 regulator-min-microvolt = <1800000>;
416 regulator-max-microvolt = <1800000>; 428 regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
419 }; 431 };
420 432
421 ldousb_reg: ldousb { 433 ldousb_reg: ldousb {
434 /* VDDA_3V_USB: VDDA_USBHS33 */
422 regulator-name = "ldousb"; 435 regulator-name = "ldousb";
423 regulator-min-microvolt = <3250000>; 436 regulator-min-microvolt = <3250000>;
424 regulator-max-microvolt = <3250000>; 437 regulator-max-microvolt = <3250000>;
425 regulator-always-on; 438 regulator-always-on;
426 regulator-boot-on; 439 regulator-boot-on;
427 }; 440 };
441
442 regen3_reg: regen3 {
443 /* REGEN3 controls LDO9 supply to card */
444 regulator-name = "regen3";
445 regulator-always-on;
446 regulator-boot-on;
447 };
428 }; 448 };
429 }; 449 };
430 }; 450 };
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index e643620417a9..07be2cd7b318 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -644,7 +644,7 @@
644 utmi-mode = <2>; 644 utmi-mode = <2>;
645 ranges; 645 ranges;
646 dwc3@4a030000 { 646 dwc3@4a030000 {
647 compatible = "synopsys,dwc3"; 647 compatible = "snps,dwc3";
648 reg = <0x4a030000 0x1000>; 648 reg = <0x4a030000 0x1000>;
649 interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; 649 interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
650 usb-phy = <&usb2_phy>, <&usb3_phy>; 650 usb-phy = <&usb2_phy>, <&usb3_phy>;
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 8a9e05d8a4b8..dba739b6ef36 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -81,6 +81,14 @@
81 81
82 macb1: ethernet@f802c000 { 82 macb1: ethernet@f802c000 {
83 phy-mode = "rmii"; 83 phy-mode = "rmii";
84
85 #address-cells = <1>;
86 #size-cells = <0>;
87 phy0: ethernet-phy@1 {
88 interrupt-parent = <&pioE>;
89 interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
90 reg = <1>;
91 };
84 }; 92 };
85 93
86 pinctrl@fffff200 { 94 pinctrl@fffff200 {
diff --git a/arch/arm/boot/dts/skeleton64.dtsi b/arch/arm/boot/dts/skeleton64.dtsi
index 15994158a998..b5d7f36f33de 100644
--- a/arch/arm/boot/dts/skeleton64.dtsi
+++ b/arch/arm/boot/dts/skeleton64.dtsi
@@ -9,5 +9,5 @@
9 #size-cells = <2>; 9 #size-cells = <2>;
10 chosen { }; 10 chosen { };
11 aliases { }; 11 aliases { };
12 memory { device_type = "memory"; reg = <0 0>; }; 12 memory { device_type = "memory"; reg = <0 0 0 0>; };
13}; 13};
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403cab8a..f5b9898d9c6e 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
6 #address-cells = <1>; 6 #address-cells = <1>;
7 #size-cells = <0>; 7 #size-cells = <0>;
8 cpu@0 { 8 cpu@0 {
9 device_type = "cpu";
9 compatible = "arm,cortex-a9"; 10 compatible = "arm,cortex-a9";
10 reg = <0>; 11 reg = <0>;
11 }; 12 };
12 cpu@1 { 13 cpu@1 {
14 device_type = "cpu";
13 compatible = "arm,cortex-a9"; 15 compatible = "arm,cortex-a9";
14 reg = <1>; 16 reg = <1>;
15 }; 17 };
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2ca160..5592be6f2f7a 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
457 }; 457 };
458 458
459 usb-phy@c5004000 { 459 usb-phy@c5004000 {
460 status = "okay";
460 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) 461 nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
461 GPIO_ACTIVE_LOW>; 462 GPIO_ACTIVE_LOW>;
462 }; 463 };
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a26..c8242533268f 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -566,7 +566,6 @@
566 566
567 usb@c5000000 { 567 usb@c5000000 {
568 status = "okay"; 568 status = "okay";
569 nvidia,vbus-gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
570 dr_mode = "otg"; 569 dr_mode = "otg";
571 }; 570 };
572 571
@@ -830,6 +829,8 @@
830 regulator-max-microvolt = <5000000>; 829 regulator-max-microvolt = <5000000>;
831 enable-active-high; 830 enable-active-high;
832 gpio = <&gpio 24 0>; /* PD0 */ 831 gpio = <&gpio 24 0>; /* PD0 */
832 regulator-always-on;
833 regulator-boot-on;
833 }; 834 };
834 }; 835 };
835 836
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b0227..1e9d33adb925 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -312,7 +312,6 @@
312 312
313 usb@c5000000 { 313 usb@c5000000 {
314 status = "okay"; 314 status = "okay";
315 nvidia,vbus-gpio = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
316 }; 315 };
317 316
318 usb-phy@c5000000 { 317 usb-phy@c5000000 {
@@ -412,6 +411,8 @@
412 regulator-max-microvolt = <5000000>; 411 regulator-max-microvolt = <5000000>;
413 enable-active-high; 412 enable-active-high;
414 gpio = <&gpio 170 0>; /* PV2 */ 413 gpio = <&gpio 170 0>; /* PV2 */
414 regulator-always-on;
415 regulator-boot-on;
415 }; 416 };
416 }; 417 };
417 418
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db280..c703197dca6e 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -509,7 +509,6 @@
509 509
510 usb@c5000000 { 510 usb@c5000000 {
511 status = "okay"; 511 status = "okay";
512 nvidia,vbus-gpio = <&tca6416 0 GPIO_ACTIVE_HIGH>;
513 }; 512 };
514 513
515 usb-phy@c5000000 { 514 usb-phy@c5000000 {
@@ -519,7 +518,6 @@
519 518
520 usb@c5008000 { 519 usb@c5008000 {
521 status = "okay"; 520 status = "okay";
522 nvidia,vbus-gpio = <&tca6416 1 GPIO_ACTIVE_HIGH>;
523 }; 521 };
524 522
525 usb-phy@c5008000 { 523 usb-phy@c5008000 {
@@ -588,6 +586,8 @@
588 regulator-max-microvolt = <5000000>; 586 regulator-max-microvolt = <5000000>;
589 enable-active-high; 587 enable-active-high;
590 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ 588 gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
589 regulator-always-on;
590 regulator-boot-on;
591 }; 591 };
592 592
593 vbus3_reg: regulator@3 { 593 vbus3_reg: regulator@3 {
@@ -598,6 +598,8 @@
598 regulator-max-microvolt = <5000000>; 598 regulator-max-microvolt = <5000000>;
599 enable-active-high; 599 enable-active-high;
600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ 600 gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
601 regulator-always-on;
602 regulator-boot-on;
601 }; 603 };
602 }; 604 };
603 605
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9653fd8288d2..e4570834512e 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -477,13 +477,13 @@
477 <&tegra_car TEGRA20_CLK_USBD>; 477 <&tegra_car TEGRA20_CLK_USBD>;
478 clock-names = "reg", "pll_u", "timer", "utmi-pads"; 478 clock-names = "reg", "pll_u", "timer", "utmi-pads";
479 nvidia,has-legacy-mode; 479 nvidia,has-legacy-mode;
480 hssync_start_delay = <9>; 480 nvidia,hssync-start-delay = <9>;
481 idle_wait_delay = <17>; 481 nvidia,idle-wait-delay = <17>;
482 elastic_limit = <16>; 482 nvidia,elastic-limit = <16>;
483 term_range_adj = <6>; 483 nvidia,term-range-adj = <6>;
484 xcvr_setup = <9>; 484 nvidia,xcvr-setup = <9>;
485 xcvr_lsfslew = <1>; 485 nvidia,xcvr-lsfslew = <1>;
486 xcvr_lsrslew = <1>; 486 nvidia,xcvr-lsrslew = <1>;
487 status = "disabled"; 487 status = "disabled";
488 }; 488 };
489 489
@@ -527,13 +527,13 @@
527 <&tegra_car TEGRA20_CLK_CLK_M>, 527 <&tegra_car TEGRA20_CLK_CLK_M>,
528 <&tegra_car TEGRA20_CLK_USBD>; 528 <&tegra_car TEGRA20_CLK_USBD>;
529 clock-names = "reg", "pll_u", "timer", "utmi-pads"; 529 clock-names = "reg", "pll_u", "timer", "utmi-pads";
530 hssync_start_delay = <9>; 530 nvidia,hssync-start-delay = <9>;
531 idle_wait_delay = <17>; 531 nvidia,idle-wait-delay = <17>;
532 elastic_limit = <16>; 532 nvidia,elastic-limit = <16>;
533 term_range_adj = <6>; 533 nvidia,term-range-adj = <6>;
534 xcvr_setup = <9>; 534 nvidia,xcvr-setup = <9>;
535 xcvr_lsfslew = <2>; 535 nvidia,xcvr-lsfslew = <2>;
536 xcvr_lsrslew = <2>; 536 nvidia,xcvr-lsrslew = <2>;
537 status = "disabled"; 537 status = "disabled";
538 }; 538 };
539 539
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts
index 90e913fb64be..7a563d2523b0 100644
--- a/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -11,13 +11,14 @@
11 11
12/dts-v1/; 12/dts-v1/;
13/include/ "wm8850.dtsi" 13/include/ "wm8850.dtsi"
14#include <dt-bindings/pwm/pwm.h>
14 15
15/ { 16/ {
16 model = "Wondermedia WM8850-W70v2 Tablet"; 17 model = "Wondermedia WM8850-W70v2 Tablet";
17 18
18 backlight { 19 backlight {
19 compatible = "pwm-backlight"; 20 compatible = "pwm-backlight";
20 pwms = <&pwm 0 50000 1>; /* duty inverted */ 21 pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
21 22
22 brightness-levels = <0 40 60 80 100 130 190 255>; 23 brightness-levels = <0 40 60 80 100 130 190 255>;
23 default-brightness-level = <5>; 24 default-brightness-level = <5>;
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 80f033614a1f..39c96df3477a 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -151,7 +151,7 @@ mcpm_setup_leave:
151 151
152 mov r0, #INBOUND_NOT_COMING_UP 152 mov r0, #INBOUND_NOT_COMING_UP
153 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] 153 strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
154 dsb 154 dsb st
155 sev 155 sev
156 156
157 mov r0, r11 157 mov r0, r11
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
index ff198583f683..8b7df283fedf 100644
--- a/arch/arm/common/vlock.S
+++ b/arch/arm/common/vlock.S
@@ -42,7 +42,7 @@
42 dmb 42 dmb
43 mov \rscratch, #0 43 mov \rscratch, #0
44 strb \rscratch, [\rbase, \rcpu] 44 strb \rscratch, [\rbase, \rcpu]
45 dsb 45 dsb st
46 sev 46 sev
47.endm 47.endm
48 48
@@ -102,7 +102,7 @@ ENTRY(vlock_unlock)
102 dmb 102 dmb
103 mov r1, #VLOCK_OWNER_NONE 103 mov r1, #VLOCK_OWNER_NONE
104 strb r1, [r0, #VLOCK_OWNER_OFFSET] 104 strb r1, [r0, #VLOCK_OWNER_OFFSET]
105 dsb 105 dsb st
106 sev 106 sev
107 bx lr 107 bx lr
108ENDPROC(vlock_unlock) 108ENDPROC(vlock_unlock)
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index 845f5cdf62b5..e7e94948d194 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -82,6 +82,13 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
82# CONFIG_HWMON is not set 82# CONFIG_HWMON is not set
83CONFIG_I2C=y 83CONFIG_I2C=y
84CONFIG_I2C_RCAR=y 84CONFIG_I2C_RCAR=y
85CONFIG_MEDIA_SUPPORT=y
86CONFIG_MEDIA_CAMERA_SUPPORT=y
87CONFIG_V4L_PLATFORM_DRIVERS=y
88CONFIG_SOC_CAMERA=y
89CONFIG_VIDEO_RCAR_VIN=y
90# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
91CONFIG_VIDEO_ML86V7667=y
85CONFIG_SPI=y 92CONFIG_SPI=y
86CONFIG_SPI_SH_HSPI=y 93CONFIG_SPI_SH_HSPI=y
87CONFIG_USB=y 94CONFIG_USB=y
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 62e968cac9dc..1f36b823905f 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -104,6 +104,7 @@ CONFIG_IP_SCTP=y
104CONFIG_VLAN_8021Q=y 104CONFIG_VLAN_8021Q=y
105CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 105CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
106CONFIG_CMA=y 106CONFIG_CMA=y
107CONFIG_DMA_CMA=y
107CONFIG_MTD=y 108CONFIG_MTD=y
108CONFIG_MTD_CMDLINE_PARTS=y 109CONFIG_MTD_CMDLINE_PARTS=y
109CONFIG_MTD_BLOCK=y 110CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index 494e70aeb9e1..c50e52be4463 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -84,6 +84,13 @@ CONFIG_GPIO_RCAR=y
84CONFIG_THERMAL=y 84CONFIG_THERMAL=y
85CONFIG_RCAR_THERMAL=y 85CONFIG_RCAR_THERMAL=y
86CONFIG_SSB=y 86CONFIG_SSB=y
87CONFIG_MEDIA_SUPPORT=y
88CONFIG_MEDIA_CAMERA_SUPPORT=y
89CONFIG_V4L_PLATFORM_DRIVERS=y
90CONFIG_SOC_CAMERA=y
91CONFIG_VIDEO_RCAR_VIN=y
92# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
93CONFIG_VIDEO_ADV7180=y
87CONFIG_USB=y 94CONFIG_USB=y
88CONFIG_USB_RCAR_PHY=y 95CONFIG_USB_RCAR_PHY=y
89CONFIG_MMC=y 96CONFIG_MMC=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 5339e6a4d639..056b27aafbe6 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -78,6 +78,7 @@ CONFIG_MAC80211_RC_PID=y
78CONFIG_MAC80211_RC_DEFAULT_PID=y 78CONFIG_MAC80211_RC_DEFAULT_PID=y
79CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 79CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
80CONFIG_CMA=y 80CONFIG_CMA=y
81CONFIG_DMA_CMA=y
81CONFIG_CONNECTOR=y 82CONFIG_CONNECTOR=y
82CONFIG_DEVTMPFS=y 83CONFIG_DEVTMPFS=y
83CONFIG_DEVTMPFS_MOUNT=y 84CONFIG_DEVTMPFS_MOUNT=y
@@ -185,13 +186,11 @@ CONFIG_OMAP2_DSS_RFBI=y
185CONFIG_OMAP2_DSS_SDI=y 186CONFIG_OMAP2_DSS_SDI=y
186CONFIG_OMAP2_DSS_DSI=y 187CONFIG_OMAP2_DSS_DSI=y
187CONFIG_FB_OMAP2=m 188CONFIG_FB_OMAP2=m
188CONFIG_PANEL_GENERIC_DPI=m 189CONFIG_DISPLAY_ENCODER_TFP410=m
189CONFIG_PANEL_TFP410=m 190CONFIG_DISPLAY_ENCODER_TPD12S015=m
190CONFIG_PANEL_SHARP_LS037V7DW01=m 191CONFIG_DISPLAY_CONNECTOR_DVI=m
191CONFIG_PANEL_NEC_NL8048HL11_01B=m 192CONFIG_DISPLAY_CONNECTOR_HDMI=m
192CONFIG_PANEL_TAAL=m 193CONFIG_DISPLAY_PANEL_DPI=m
193CONFIG_PANEL_TPO_TD043MTEA1=m
194CONFIG_PANEL_ACX565AKM=m
195CONFIG_BACKLIGHT_LCD_SUPPORT=y 194CONFIG_BACKLIGHT_LCD_SUPPORT=y
196CONFIG_LCD_CLASS_DEVICE=y 195CONFIG_LCD_CLASS_DEVICE=y
197CONFIG_LCD_PLATFORM=y 196CONFIG_LCD_PLATFORM=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 1effb43dab80..92d0a149aeb5 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -79,6 +79,7 @@ CONFIG_DEVTMPFS=y
79CONFIG_DEVTMPFS_MOUNT=y 79CONFIG_DEVTMPFS_MOUNT=y
80# CONFIG_FIRMWARE_IN_KERNEL is not set 80# CONFIG_FIRMWARE_IN_KERNEL is not set
81CONFIG_CMA=y 81CONFIG_CMA=y
82CONFIG_DMA_CMA=y
82CONFIG_MTD=y 83CONFIG_MTD=y
83CONFIG_MTD_M25P80=y 84CONFIG_MTD_M25P80=y
84CONFIG_PROC_DEVICETREE=y 85CONFIG_PROC_DEVICETREE=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index e406d575c94f..5665134bfa3e 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -17,7 +17,8 @@ int arch_timer_arch_init(void);
17 * nicely work out which register we want, and chuck away the rest of 17 * nicely work out which register we want, and chuck away the rest of
18 * the code. At least it does so with a recent GCC (4.6.3). 18 * the code. At least it does so with a recent GCC (4.6.3).
19 */ 19 */
20static inline void arch_timer_reg_write(const int access, const int reg, u32 val) 20static __always_inline
21void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
21{ 22{
22 if (access == ARCH_TIMER_PHYS_ACCESS) { 23 if (access == ARCH_TIMER_PHYS_ACCESS) {
23 switch (reg) { 24 switch (reg) {
@@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
28 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); 29 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
29 break; 30 break;
30 } 31 }
31 } 32 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
32
33 if (access == ARCH_TIMER_VIRT_ACCESS) {
34 switch (reg) { 33 switch (reg) {
35 case ARCH_TIMER_REG_CTRL: 34 case ARCH_TIMER_REG_CTRL:
36 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); 35 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
@@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
44 isb(); 43 isb();
45} 44}
46 45
47static inline u32 arch_timer_reg_read(const int access, const int reg) 46static __always_inline
47u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
48{ 48{
49 u32 val = 0; 49 u32 val = 0;
50 50
@@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg)
57 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); 57 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
58 break; 58 break;
59 } 59 }
60 } 60 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
61
62 if (access == ARCH_TIMER_VIRT_ACCESS) {
63 switch (reg) { 61 switch (reg) {
64 case ARCH_TIMER_REG_CTRL: 62 case ARCH_TIMER_REG_CTRL:
65 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); 63 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a5fef710af32..fcc1b5bf6979 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -220,9 +220,9 @@
220#ifdef CONFIG_SMP 220#ifdef CONFIG_SMP
221#if __LINUX_ARM_ARCH__ >= 7 221#if __LINUX_ARM_ARCH__ >= 7
222 .ifeqs "\mode","arm" 222 .ifeqs "\mode","arm"
223 ALT_SMP(dmb) 223 ALT_SMP(dmb ish)
224 .else 224 .else
225 ALT_SMP(W(dmb)) 225 ALT_SMP(W(dmb) ish)
226 .endif 226 .endif
227#elif __LINUX_ARM_ARCH__ == 6 227#elif __LINUX_ARM_ARCH__ == 6
228 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb 228 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d90..60f15e274e6d 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -14,27 +14,27 @@
14#endif 14#endif
15 15
16#if __LINUX_ARM_ARCH__ >= 7 16#if __LINUX_ARM_ARCH__ >= 7
17#define isb() __asm__ __volatile__ ("isb" : : : "memory") 17#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
18#define dsb() __asm__ __volatile__ ("dsb" : : : "memory") 18#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
19#define dmb() __asm__ __volatile__ ("dmb" : : : "memory") 19#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 20#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
21#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 21#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
22 : : "r" (0) : "memory") 22 : : "r" (0) : "memory")
23#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 23#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
24 : : "r" (0) : "memory") 24 : : "r" (0) : "memory")
25#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ 25#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
26 : : "r" (0) : "memory") 26 : : "r" (0) : "memory")
27#elif defined(CONFIG_CPU_FA526) 27#elif defined(CONFIG_CPU_FA526)
28#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ 28#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
29 : : "r" (0) : "memory") 29 : : "r" (0) : "memory")
30#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 30#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
31 : : "r" (0) : "memory") 31 : : "r" (0) : "memory")
32#define dmb() __asm__ __volatile__ ("" : : : "memory") 32#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
33#else 33#else
34#define isb() __asm__ __volatile__ ("" : : : "memory") 34#define isb(x) __asm__ __volatile__ ("" : : : "memory")
35#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ 35#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
36 : : "r" (0) : "memory") 36 : : "r" (0) : "memory")
37#define dmb() __asm__ __volatile__ ("" : : : "memory") 37#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
38#endif 38#endif
39 39
40#ifdef CONFIG_ARCH_HAS_BARRIERS 40#ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) 42#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
43#define mb() do { dsb(); outer_sync(); } while (0) 43#define mb() do { dsb(); outer_sync(); } while (0)
44#define rmb() dsb() 44#define rmb() dsb()
45#define wmb() mb() 45#define wmb() do { dsb(st); outer_sync(); } while (0)
46#else 46#else
47#define mb() barrier() 47#define mb() barrier()
48#define rmb() barrier() 48#define rmb() barrier()
@@ -54,9 +54,9 @@
54#define smp_rmb() barrier() 54#define smp_rmb() barrier()
55#define smp_wmb() barrier() 55#define smp_wmb() barrier()
56#else 56#else
57#define smp_mb() dmb() 57#define smp_mb() dmb(ish)
58#define smp_rmb() dmb() 58#define smp_rmb() smp_mb()
59#define smp_wmb() dmb() 59#define smp_wmb() dmb(ishst)
60#endif 60#endif
61 61
62#define read_barrier_depends() do { } while(0) 62#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672fa..15f2d5bf8875 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
268 * Harvard caches are synchronised for the user space address range. 268 * Harvard caches are synchronised for the user space address range.
269 * This is used for the ARM private sys_cacheflush system call. 269 * This is used for the ARM private sys_cacheflush system call.
270 */ 270 */
271#define flush_cache_user_range(start,end) \ 271#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
272 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
273 272
274/* 273/*
275 * Perform necessary cache operations to ensure that data previously 274 * Perform necessary cache operations to ensure that data previously
@@ -352,7 +351,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
352 * set_pte_at() called from vmap_pte_range() does not 351 * set_pte_at() called from vmap_pte_range() does not
353 * have a DSB after cleaning the cache line. 352 * have a DSB after cleaning the cache line.
354 */ 353 */
355 dsb(); 354 dsb(ishst);
356} 355}
357 356
358static inline void flush_cache_vunmap(unsigned long start, unsigned long end) 357static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 3ed37b4d93da..e072bb2ba1b1 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -2,7 +2,7 @@
2#define ASMARM_DMA_CONTIGUOUS_H 2#define ASMARM_DMA_CONTIGUOUS_H
3 3
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5#ifdef CONFIG_CMA 5#ifdef CONFIG_DMA_CMA
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <asm-generic/dma-contiguous.h> 8#include <asm-generic/dma-contiguous.h>
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 56211f2084ef..f4b46d39b9cf 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -19,8 +19,6 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
19 19
20typedef struct user_fp elf_fpregset_t; 20typedef struct user_fp elf_fpregset_t;
21 21
22#define EM_ARM 40
23
24#define EF_ARM_EABI_MASK 0xff000000 22#define EF_ARM_EABI_MASK 0xff000000
25#define EF_ARM_EABI_UNKNOWN 0x00000000 23#define EF_ARM_EABI_UNKNOWN 0x00000000
26#define EF_ARM_EABI_VER1 0x01000000 24#define EF_ARM_EABI_VER1 0x01000000
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644
index 22c689255e6e..000000000000
--- a/arch/arm/include/asm/hardware/debug-8250.S
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * arch/arm/include/asm/hardware/debug-8250.S
3 *
4 * Copyright (C) 1994-1999 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/serial_reg.h>
11
12 .macro senduart,rd,rx
13 strb \rd, [\rx, #UART_TX << UART_SHIFT]
14 .endm
15
16 .macro busyuart,rd,rx
171002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
18 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
19 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
20 bne 1002b
21 .endm
22
23 .macro waituart,rd,rx
24#ifdef FLOW_CONTROL
251001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
26 tst \rd, #UART_MSR_CTS
27 beq 1001b
28#endif
29 .endm
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 472ac7091003..9b28c41f4ba9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -64,7 +64,7 @@ void kvm_clear_hyp_idmap(void);
64 64
65static inline void kvm_set_pte(pte_t *pte, pte_t new_pte) 65static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
66{ 66{
67 pte_val(*pte) = new_pte; 67 *pte = new_pte;
68 /* 68 /*
69 * flush_pmd_entry just takes a void pointer and cleans the necessary 69 * flush_pmd_entry just takes a void pointer and cleans the necessary
70 * cache entries, so we can reuse the function for ptes. 70 * cache entries, so we can reuse the function for ptes.
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 441efc491b50..69b879ac0289 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -65,12 +65,12 @@ struct machine_desc {
65/* 65/*
66 * Current machine - only accessible during boot. 66 * Current machine - only accessible during boot.
67 */ 67 */
68extern struct machine_desc *machine_desc; 68extern const struct machine_desc *machine_desc;
69 69
70/* 70/*
71 * Machine type table - also only accessible during boot 71 * Machine type table - also only accessible during boot
72 */ 72 */
73extern struct machine_desc __arch_info_begin[], __arch_info_end[]; 73extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
74#define for_each_machine_desc(p) \ 74#define for_each_machine_desc(p) \
75 for (p = __arch_info_begin; p < __arch_info_end; p++) 75 for (p = __arch_info_begin; p < __arch_info_end; p++)
76 76
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index 00ca5f92648e..c2f5102ae659 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -4,8 +4,7 @@
4struct meminfo; 4struct meminfo;
5struct machine_desc; 5struct machine_desc;
6 6
7extern void arm_memblock_init(struct meminfo *, struct machine_desc *); 7void arm_memblock_init(struct meminfo *, const struct machine_desc *);
8
9phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); 8phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
10 9
11#endif 10#endif
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e5..ed690c49ef93 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -12,6 +12,8 @@ enum {
12 ARM_SEC_CORE, 12 ARM_SEC_CORE,
13 ARM_SEC_EXIT, 13 ARM_SEC_EXIT,
14 ARM_SEC_DEVEXIT, 14 ARM_SEC_DEVEXIT,
15 ARM_SEC_HOT,
16 ARM_SEC_UNLIKELY,
15 ARM_SEC_MAX, 17 ARM_SEC_MAX,
16}; 18};
17 19
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 000000000000..8f730fe70093
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,36 @@
1/*
2 * linux/arch/arm/include/asm/neon.h
3 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <asm/hwcap.h>
12
13#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
14
15#ifdef __ARM_NEON__
16
17/*
18 * If you are affected by the BUILD_BUG below, it probably means that you are
19 * using NEON code /and/ calling the kernel_neon_begin() function from the same
20 * compilation unit. To prevent issues that may arise from GCC reordering or
21 * generating(1) NEON instructions outside of these begin/end functions, the
22 * only supported way of using NEON code in the kernel is by isolating it in a
23 * separate compilation unit, and calling it from another unit from inside a
24 * kernel_neon_begin/kernel_neon_end pair.
25 *
26 * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
27 * -mpfu=neon is set.
28 */
29
30#define kernel_neon_begin() \
31 BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
32
33#else
34void kernel_neon_begin(void);
35#endif
36void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 04aeb02d2e11..be956dbf6bae 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device;
100#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) 100#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
101#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) 101#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
102#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) 102#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
103#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) 103#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
104 104
105#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) 105#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
106#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) 106#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index a219227c3e43..4a2985e21969 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -15,13 +15,13 @@
15 15
16#ifdef CONFIG_OF 16#ifdef CONFIG_OF
17 17
18extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); 18extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
19extern void arm_dt_memblock_reserve(void); 19extern void arm_dt_memblock_reserve(void);
20extern void __init arm_dt_init_cpu_maps(void); 20extern void __init arm_dt_init_cpu_maps(void);
21 21
22#else /* CONFIG_OF */ 22#else /* CONFIG_OF */
23 23
24static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys) 24static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
25{ 25{
26 return NULL; 26 return NULL;
27} 27}
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd4..a252c0bfacf5 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
88{ 88{
89 return 1 << mpidr_hash.bits; 89 return 1 << mpidr_hash.bits;
90} 90}
91
92extern int platform_can_cpu_hotplug(void);
93
91#endif 94#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e9..4f2c28060c9a 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -46,7 +46,7 @@ static inline void dsb_sev(void)
46{ 46{
47#if __LINUX_ARM_ARCH__ >= 7 47#if __LINUX_ARM_ARCH__ >= 7
48 __asm__ __volatile__ ( 48 __asm__ __volatile__ (
49 "dsb\n" 49 "dsb ishst\n"
50 SEV 50 SEV
51 ); 51 );
52#else 52#else
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
107 " subs %1, %0, %0, ror #16\n" 107 " subs %1, %0, %0, ror #16\n"
108 " addeq %0, %0, %4\n" 108 " addeq %0, %0, %4\n"
109 " strexeq %2, %0, [%3]" 109 " strexeq %2, %0, [%3]"
110 : "=&r" (slock), "=&r" (contended), "=r" (res) 110 : "=&r" (slock), "=&r" (contended), "=&r" (res)
111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112 : "cc"); 112 : "cc");
113 } while (res); 113 } while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
168 168
169static inline int arch_write_trylock(arch_rwlock_t *rw) 169static inline int arch_write_trylock(arch_rwlock_t *rw)
170{ 170{
171 unsigned long tmp; 171 unsigned long contended, res;
172 172
173 __asm__ __volatile__( 173 do {
174" ldrex %0, [%1]\n" 174 __asm__ __volatile__(
175" teq %0, #0\n" 175 " ldrex %0, [%2]\n"
176" strexeq %0, %2, [%1]" 176 " mov %1, #0\n"
177 : "=&r" (tmp) 177 " teq %0, #0\n"
178 : "r" (&rw->lock), "r" (0x80000000) 178 " strexeq %1, %3, [%2]"
179 : "cc"); 179 : "=&r" (contended), "=&r" (res)
180 : "r" (&rw->lock), "r" (0x80000000)
181 : "cc");
182 } while (res);
180 183
181 if (tmp == 0) { 184 if (!contended) {
182 smp_mb(); 185 smp_mb();
183 return 1; 186 return 1;
184 } else { 187 } else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
254 257
255static inline int arch_read_trylock(arch_rwlock_t *rw) 258static inline int arch_read_trylock(arch_rwlock_t *rw)
256{ 259{
257 unsigned long tmp, tmp2 = 1; 260 unsigned long contended, res;
258 261
259 __asm__ __volatile__( 262 do {
260" ldrex %0, [%2]\n" 263 __asm__ __volatile__(
261" adds %0, %0, #1\n" 264 " ldrex %0, [%2]\n"
262" strexpl %1, %0, [%2]\n" 265 " mov %1, #0\n"
263 : "=&r" (tmp), "+r" (tmp2) 266 " adds %0, %0, #1\n"
264 : "r" (&rw->lock) 267 " strexpl %1, %0, [%2]"
265 : "cc"); 268 : "=&r" (contended), "=&r" (res)
269 : "r" (&rw->lock)
270 : "cc");
271 } while (res);
266 272
267 smp_mb(); 273 /* If the lock is negative, then it is already held for write. */
268 return tmp2 == 0; 274 if (contended < 0x80000000) {
275 smp_mb();
276 return 1;
277 } else {
278 return 0;
279 }
269} 280}
270 281
271/* read_can_lock - would read_trylock() succeed? */ 282/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index fa09e6b49bf1..c99e259469f7 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -4,6 +4,16 @@
4#include <linux/thread_info.h> 4#include <linux/thread_info.h>
5 5
6/* 6/*
7 * For v7 SMP cores running a preemptible kernel we may be pre-empted
8 * during a TLB maintenance operation, so execute an inner-shareable dsb
9 * to ensure that the maintenance completes in case we migrate to another
10 * CPU.
11 */
12#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
13#define finish_arch_switch(prev) dsb(ish)
14#endif
15
16/*
7 * switch_to(prev, next) should switch from task `prev' to `next' 17 * switch_to(prev, next) should switch from task `prev' to `next'
8 * `prev' will never be the same as `next'. schedule() itself 18 * `prev' will never be the same as `next'. schedule() itself
9 * contains the memory barrier to tell GCC not to cache `current'. 19 * contains the memory barrier to tell GCC not to cache `current'.
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 2b8114fcba09..df5e13d64f2c 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -43,6 +43,16 @@ struct cpu_context_save {
43 __u32 extra[2]; /* Xscale 'acc' register, etc */ 43 __u32 extra[2]; /* Xscale 'acc' register, etc */
44}; 44};
45 45
46struct arm_restart_block {
47 union {
48 /* For user cache flushing */
49 struct {
50 unsigned long start;
51 unsigned long end;
52 } cache;
53 };
54};
55
46/* 56/*
47 * low level task data that entry.S needs immediate access to. 57 * low level task data that entry.S needs immediate access to.
48 * __switch_to() assumes cpu_context follows immediately after cpu_domain. 58 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -68,6 +78,7 @@ struct thread_info {
68 unsigned long thumbee_state; /* ThumbEE Handler Base register */ 78 unsigned long thumbee_state; /* ThumbEE Handler Base register */
69#endif 79#endif
70 struct restart_block restart_block; 80 struct restart_block restart_block;
81 struct arm_restart_block arm_restart_block;
71}; 82};
72 83
73#define INIT_THREAD_INFO(tsk) \ 84#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e721..0baf7f0d9394 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
43 struct mm_struct *mm; 43 struct mm_struct *mm;
44 unsigned int fullmm; 44 unsigned int fullmm;
45 struct vm_area_struct *vma; 45 struct vm_area_struct *vma;
46 unsigned long start, end;
46 unsigned long range_start; 47 unsigned long range_start;
47 unsigned long range_end; 48 unsigned long range_end;
48 unsigned int nr; 49 unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
107} 108}
108 109
109static inline void 110static inline void
110tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 111tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
111{ 112{
112 tlb->mm = mm; 113 tlb->mm = mm;
113 tlb->fullmm = fullmm; 114 tlb->fullmm = !(start | (end+1));
115 tlb->start = start;
116 tlb->end = end;
114 tlb->vma = NULL; 117 tlb->vma = NULL;
115 tlb->max = ARRAY_SIZE(tlb->local); 118 tlb->max = ARRAY_SIZE(tlb->local);
116 tlb->pages = tlb->local; 119 tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index f467e9b3f8d5..38960264040c 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -319,67 +319,110 @@ extern struct cpu_tlb_fns cpu_tlb;
319#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) 319#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
320#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) 320#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
321 321
322static inline void local_flush_tlb_all(void) 322static inline void __local_flush_tlb_all(void)
323{ 323{
324 const int zero = 0; 324 const int zero = 0;
325 const unsigned int __tlb_flag = __cpu_tlb_flags; 325 const unsigned int __tlb_flag = __cpu_tlb_flags;
326 326
327 if (tlb_flag(TLB_WB))
328 dsb();
329
330 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); 327 tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
331 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); 328 tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
332 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); 329 tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
333 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); 330}
331
332static inline void local_flush_tlb_all(void)
333{
334 const int zero = 0;
335 const unsigned int __tlb_flag = __cpu_tlb_flags;
336
337 if (tlb_flag(TLB_WB))
338 dsb(nshst);
339
340 __local_flush_tlb_all();
341 tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
334 342
335 if (tlb_flag(TLB_BARRIER)) { 343 if (tlb_flag(TLB_BARRIER)) {
336 dsb(); 344 dsb(nsh);
337 isb(); 345 isb();
338 } 346 }
339} 347}
340 348
341static inline void local_flush_tlb_mm(struct mm_struct *mm) 349static inline void __flush_tlb_all(void)
342{ 350{
343 const int zero = 0; 351 const int zero = 0;
344 const int asid = ASID(mm);
345 const unsigned int __tlb_flag = __cpu_tlb_flags; 352 const unsigned int __tlb_flag = __cpu_tlb_flags;
346 353
347 if (tlb_flag(TLB_WB)) 354 if (tlb_flag(TLB_WB))
348 dsb(); 355 dsb(ishst);
356
357 __local_flush_tlb_all();
358 tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
359
360 if (tlb_flag(TLB_BARRIER)) {
361 dsb(ish);
362 isb();
363 }
364}
365
366static inline void __local_flush_tlb_mm(struct mm_struct *mm)
367{
368 const int zero = 0;
369 const int asid = ASID(mm);
370 const unsigned int __tlb_flag = __cpu_tlb_flags;
349 371
350 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { 372 if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
351 if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { 373 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
352 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); 374 tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
353 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); 375 tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
354 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); 376 tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
355 } 377 }
356 put_cpu();
357 } 378 }
358 379
359 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); 380 tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
360 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); 381 tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
361 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); 382 tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
383}
384
385static inline void local_flush_tlb_mm(struct mm_struct *mm)
386{
387 const int asid = ASID(mm);
388 const unsigned int __tlb_flag = __cpu_tlb_flags;
389
390 if (tlb_flag(TLB_WB))
391 dsb(nshst);
392
393 __local_flush_tlb_mm(mm);
394 tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
395
396 if (tlb_flag(TLB_BARRIER))
397 dsb(nsh);
398}
399
400static inline void __flush_tlb_mm(struct mm_struct *mm)
401{
402 const unsigned int __tlb_flag = __cpu_tlb_flags;
403
404 if (tlb_flag(TLB_WB))
405 dsb(ishst);
406
407 __local_flush_tlb_mm(mm);
362#ifdef CONFIG_ARM_ERRATA_720789 408#ifdef CONFIG_ARM_ERRATA_720789
363 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); 409 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
364#else 410#else
365 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); 411 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
366#endif 412#endif
367 413
368 if (tlb_flag(TLB_BARRIER)) 414 if (tlb_flag(TLB_BARRIER))
369 dsb(); 415 dsb(ish);
370} 416}
371 417
372static inline void 418static inline void
373local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) 419__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
374{ 420{
375 const int zero = 0; 421 const int zero = 0;
376 const unsigned int __tlb_flag = __cpu_tlb_flags; 422 const unsigned int __tlb_flag = __cpu_tlb_flags;
377 423
378 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); 424 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
379 425
380 if (tlb_flag(TLB_WB))
381 dsb();
382
383 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && 426 if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
384 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 427 cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
385 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); 428 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
@@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
392 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); 435 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
393 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); 436 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
394 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); 437 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
438}
439
440static inline void
441local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
442{
443 const unsigned int __tlb_flag = __cpu_tlb_flags;
444
445 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
446
447 if (tlb_flag(TLB_WB))
448 dsb(nshst);
449
450 __local_flush_tlb_page(vma, uaddr);
451 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
452
453 if (tlb_flag(TLB_BARRIER))
454 dsb(nsh);
455}
456
457static inline void
458__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
459{
460 const unsigned int __tlb_flag = __cpu_tlb_flags;
461
462 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
463
464 if (tlb_flag(TLB_WB))
465 dsb(ishst);
466
467 __local_flush_tlb_page(vma, uaddr);
395#ifdef CONFIG_ARM_ERRATA_720789 468#ifdef CONFIG_ARM_ERRATA_720789
396 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); 469 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
397#else 470#else
@@ -399,19 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
399#endif 472#endif
400 473
401 if (tlb_flag(TLB_BARRIER)) 474 if (tlb_flag(TLB_BARRIER))
402 dsb(); 475 dsb(ish);
403} 476}
404 477
405static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 478static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
406{ 479{
407 const int zero = 0; 480 const int zero = 0;
408 const unsigned int __tlb_flag = __cpu_tlb_flags; 481 const unsigned int __tlb_flag = __cpu_tlb_flags;
409 482
410 kaddr &= PAGE_MASK;
411
412 if (tlb_flag(TLB_WB))
413 dsb();
414
415 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); 483 tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
416 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); 484 tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
417 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); 485 tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -421,26 +489,75 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
421 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); 489 tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
422 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); 490 tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
423 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); 491 tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
492}
493
494static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
495{
496 const unsigned int __tlb_flag = __cpu_tlb_flags;
497
498 kaddr &= PAGE_MASK;
499
500 if (tlb_flag(TLB_WB))
501 dsb(nshst);
502
503 __local_flush_tlb_kernel_page(kaddr);
504 tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
505
506 if (tlb_flag(TLB_BARRIER)) {
507 dsb(nsh);
508 isb();
509 }
510}
511
512static inline void __flush_tlb_kernel_page(unsigned long kaddr)
513{
514 const unsigned int __tlb_flag = __cpu_tlb_flags;
515
516 kaddr &= PAGE_MASK;
517
518 if (tlb_flag(TLB_WB))
519 dsb(ishst);
520
521 __local_flush_tlb_kernel_page(kaddr);
424 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); 522 tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
425 523
426 if (tlb_flag(TLB_BARRIER)) { 524 if (tlb_flag(TLB_BARRIER)) {
427 dsb(); 525 dsb(ish);
428 isb(); 526 isb();
429 } 527 }
430} 528}
431 529
530/*
531 * Branch predictor maintenance is paired with full TLB invalidation, so
532 * there is no need for any barriers here.
533 */
534static inline void __local_flush_bp_all(void)
535{
536 const int zero = 0;
537 const unsigned int __tlb_flag = __cpu_tlb_flags;
538
539 if (tlb_flag(TLB_V6_BP))
540 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
541}
542
432static inline void local_flush_bp_all(void) 543static inline void local_flush_bp_all(void)
433{ 544{
434 const int zero = 0; 545 const int zero = 0;
435 const unsigned int __tlb_flag = __cpu_tlb_flags; 546 const unsigned int __tlb_flag = __cpu_tlb_flags;
436 547
548 __local_flush_bp_all();
437 if (tlb_flag(TLB_V7_UIS_BP)) 549 if (tlb_flag(TLB_V7_UIS_BP))
438 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
439 else if (tlb_flag(TLB_V6_BP))
440 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); 550 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
551}
441 552
442 if (tlb_flag(TLB_BARRIER)) 553static inline void __flush_bp_all(void)
443 isb(); 554{
555 const int zero = 0;
556 const unsigned int __tlb_flag = __cpu_tlb_flags;
557
558 __local_flush_bp_all();
559 if (tlb_flag(TLB_V7_UIS_BP))
560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
444} 561}
445 562
446#include <asm/cputype.h> 563#include <asm/cputype.h>
@@ -461,7 +578,7 @@ static inline void dummy_flush_tlb_a15_erratum(void)
461 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. 578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
462 */ 579 */
463 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); 580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
464 dsb(); 581 dsb(ish);
465} 582}
466#else 583#else
467static inline int erratum_a15_798181(void) 584static inline int erratum_a15_798181(void)
@@ -495,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd)
495 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); 612 tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
496 613
497 if (tlb_flag(TLB_WB)) 614 if (tlb_flag(TLB_WB))
498 dsb(); 615 dsb(ishst);
499} 616}
500 617
501static inline void clean_pmd_entry(void *pmd) 618static inline void clean_pmd_entry(void *pmd)
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644
index 000000000000..a53cdb8f068c
--- /dev/null
+++ b/arch/arm/include/asm/types.h
@@ -0,0 +1,40 @@
1#ifndef _ASM_TYPES_H
2#define _ASM_TYPES_H
3
4#include <asm-generic/int-ll64.h>
5
6/*
7 * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
8 * unambiguous on ARM as you would expect. For the types below, there is a
9 * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
10 * and the kernel itself, which results in build errors if you try to build with
11 * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
12 * in order to use NEON intrinsics)
13 *
14 * As the typedefs for these types in 'stdint.h' are based on builtin defines
15 * supplied by GCC, we can tweak these to align with the kernel's idea of those
16 * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
17 * source file (provided that -ffreestanding is used).
18 *
19 * int32_t uint32_t uintptr_t
20 * bare metal GCC long unsigned long unsigned int
21 * glibc GCC int unsigned int unsigned int
22 * kernel int unsigned int unsigned long
23 */
24
25#ifdef __INT32_TYPE__
26#undef __INT32_TYPE__
27#define __INT32_TYPE__ int
28#endif
29
30#ifdef __UINT32_TYPE__
31#undef __UINT32_TYPE__
32#define __UINT32_TYPE__ unsigned int
33#endif
34
35#ifdef __UINTPTR_TYPE__
36#undef __UINTPTR_TYPE__
37#define __UINTPTR_TYPE__ unsigned long
38#endif
39
40#endif /* _ASM_TYPES_H */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index fa88d09fa3d9..615781c61627 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -15,6 +15,10 @@
15 15
16#define V7M_SCB_VTOR 0x08 16#define V7M_SCB_VTOR 0x08
17 17
18#define V7M_SCB_AIRCR 0x0c
19#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
20#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
21
18#define V7M_SCB_SCR 0x10 22#define V7M_SCB_SCR 0x10
19#define V7M_SCB_SCR_SLEEPDEEP (1 << 2) 23#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
20 24
@@ -42,3 +46,11 @@
42 */ 46 */
43#define EXC_RET_STACK_MASK 0x00000004 47#define EXC_RET_STACK_MASK 0x00000004
44#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd 48#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
49
50#ifndef __ASSEMBLY__
51
52enum reboot_mode;
53
54void armv7m_restart(enum reboot_mode mode, const char *cmd);
55
56#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 7604673dc427..4ffb26d4cad8 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -7,7 +7,10 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/hardirq.h>
10#include <asm-generic/xor.h> 11#include <asm-generic/xor.h>
12#include <asm/hwcap.h>
13#include <asm/neon.h>
11 14
12#define __XOR(a1, a2) a1 ^= a2 15#define __XOR(a1, a2) a1 ^= a2
13 16
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
138 xor_speed(&xor_block_arm4regs); \ 141 xor_speed(&xor_block_arm4regs); \
139 xor_speed(&xor_block_8regs); \ 142 xor_speed(&xor_block_8regs); \
140 xor_speed(&xor_block_32regs); \ 143 xor_speed(&xor_block_32regs); \
144 NEON_TEMPLATES; \
141 } while (0) 145 } while (0)
146
147#ifdef CONFIG_KERNEL_MODE_NEON
148
149extern struct xor_block_template const xor_block_neon_inner;
150
151static void
152xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
153{
154 if (in_interrupt()) {
155 xor_arm4regs_2(bytes, p1, p2);
156 } else {
157 kernel_neon_begin();
158 xor_block_neon_inner.do_2(bytes, p1, p2);
159 kernel_neon_end();
160 }
161}
162
163static void
164xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
165 unsigned long *p3)
166{
167 if (in_interrupt()) {
168 xor_arm4regs_3(bytes, p1, p2, p3);
169 } else {
170 kernel_neon_begin();
171 xor_block_neon_inner.do_3(bytes, p1, p2, p3);
172 kernel_neon_end();
173 }
174}
175
176static void
177xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
178 unsigned long *p3, unsigned long *p4)
179{
180 if (in_interrupt()) {
181 xor_arm4regs_4(bytes, p1, p2, p3, p4);
182 } else {
183 kernel_neon_begin();
184 xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
185 kernel_neon_end();
186 }
187}
188
189static void
190xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
191 unsigned long *p3, unsigned long *p4, unsigned long *p5)
192{
193 if (in_interrupt()) {
194 xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
195 } else {
196 kernel_neon_begin();
197 xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
198 kernel_neon_end();
199 }
200}
201
202static struct xor_block_template xor_block_neon = {
203 .name = "neon",
204 .do_2 = xor_neon_2,
205 .do_3 = xor_neon_3,
206 .do_4 = xor_neon_4,
207 .do_5 = xor_neon_5
208};
209
210#define NEON_TEMPLATES \
211 do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
212#else
213#define NEON_TEMPLATES
214#endif
diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S
new file mode 100644
index 000000000000..7a2baf913aa0
--- /dev/null
+++ b/arch/arm/include/debug/8250.S
@@ -0,0 +1,54 @@
1/*
2 * arch/arm/include/debug/8250.S
3 *
4 * Copyright (C) 1994-2013 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/serial_reg.h>
11
12 .macro addruart, rp, rv, tmp
13 ldr \rp, =CONFIG_DEBUG_UART_PHYS
14 ldr \rv, =CONFIG_DEBUG_UART_VIRT
15 .endm
16
17#ifdef CONFIG_DEBUG_UART_8250_WORD
18 .macro store, rd, rx:vararg
19 str \rd, \rx
20 .endm
21
22 .macro load, rd, rx:vararg
23 ldr \rd, \rx
24 .endm
25#else
26 .macro store, rd, rx:vararg
27 strb \rd, \rx
28 .endm
29
30 .macro load, rd, rx:vararg
31 ldrb \rd, \rx
32 .endm
33#endif
34
35#define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT
36
37 .macro senduart,rd,rx
38 store \rd, [\rx, #UART_TX << UART_SHIFT]
39 .endm
40
41 .macro busyuart,rd,rx
421002: load \rd, [\rx, #UART_LSR << UART_SHIFT]
43 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
44 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
45 bne 1002b
46 .endm
47
48 .macro waituart,rd,rx
49#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL
501001: load \rd, [\rx, #UART_MSR << UART_SHIFT]
51 tst \rd, #UART_MSR_CTS
52 beq 1001b
53#endif
54 .endm
diff --git a/arch/arm/include/debug/8250_32.S b/arch/arm/include/debug/8250_32.S
deleted file mode 100644
index 8db01eeabbb4..000000000000
--- a/arch/arm/include/debug/8250_32.S
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (c) 2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
9 * accesses to the 8250.
10 */
11
12#include <linux/serial_reg.h>
13
14 .macro senduart,rd,rx
15 str \rd, [\rx, #UART_TX << UART_SHIFT]
16 .endm
17
18 .macro busyuart,rd,rx
191002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
20 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
21 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
22 bne 1002b
23 .endm
24
25 /* The UART's don't have any flow control IO's wired up. */
26 .macro waituart,rd,rx
27 .endm
diff --git a/arch/arm/include/debug/bcm2835.S b/arch/arm/include/debug/bcm2835.S
deleted file mode 100644
index aed9199bd847..000000000000
--- a/arch/arm/include/debug/bcm2835.S
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 2010 Broadcom
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#define BCM2835_DEBUG_PHYS 0x20201000
15#define BCM2835_DEBUG_VIRT 0xf0201000
16
17 .macro addruart, rp, rv, tmp
18 ldr \rp, =BCM2835_DEBUG_PHYS
19 ldr \rv, =BCM2835_DEBUG_VIRT
20 .endm
21
22#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/cns3xxx.S b/arch/arm/include/debug/cns3xxx.S
deleted file mode 100644
index d04c150baa1c..000000000000
--- a/arch/arm/include/debug/cns3xxx.S
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright 1994-1999 Russell King
5 * Copyright 2008 Cavium Networks
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This file is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, Version 2, as
10 * published by the Free Software Foundation.
11 */
12
13 .macro addruart,rp,rv,tmp
14 mov \rp, #0x00009000
15 orr \rv, \rp, #0xf0000000 @ virtual base
16 orr \rp, \rp, #0x10000000
17 .endm
18
19#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/highbank.S b/arch/arm/include/debug/highbank.S
deleted file mode 100644
index 8cad4322a5a2..000000000000
--- a/arch/arm/include/debug/highbank.S
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 .macro addruart,rp,rv,tmp
13 ldr \rv, =0xfee36000
14 ldr \rp, =0xfff36000
15 .endm
16
17#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/keystone.S b/arch/arm/include/debug/keystone.S
deleted file mode 100644
index 9aef9ba3f4f0..000000000000
--- a/arch/arm/include/debug/keystone.S
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Early serial debug output macro for Keystone SOCs
3 *
4 * Copyright 2013 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 * Based on RMKs low level debug code.
8 * Copyright (C) 1994-1999 Russell King
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/serial_reg.h>
16
17#define UART_SHIFT 2
18#if defined(CONFIG_DEBUG_KEYSTONE_UART0)
19#define UART_PHYS 0x02530c00
20#define UART_VIRT 0xfeb30c00
21#elif defined(CONFIG_DEBUG_KEYSTONE_UART1)
22#define UART_PHYS 0x02531000
23#define UART_VIRT 0xfeb31000
24#endif
25
26 .macro addruart, rp, rv, tmp
27 ldr \rv, =UART_VIRT @ physical base address
28 ldr \rp, =UART_PHYS @ virtual base address
29 .endm
30
31 .macro senduart,rd,rx
32 str \rd, [\rx, #UART_TX << UART_SHIFT]
33 .endm
34
35 .macro busyuart,rd,rx
361002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
37 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
38 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
39 bne 1002b
40 .endm
41
42 .macro waituart,rd,rx
43 .endm
diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S
deleted file mode 100644
index 6517311a1c91..000000000000
--- a/arch/arm/include/debug/mvebu.S
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * Early serial output macro for Marvell SoC
3 *
4 * Copyright (C) 2012 Marvell
5 *
6 * Lior Amsalem <alior@marvell.com>
7 * Gregory Clement <gregory.clement@free-electrons.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#ifdef CONFIG_DEBUG_MVEBU_UART_ALTERNATE
15#define ARMADA_370_XP_REGS_PHYS_BASE 0xf1000000
16#else
17#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000
18#endif
19
20#define ARMADA_370_XP_REGS_VIRT_BASE 0xfec00000
21
22 .macro addruart, rp, rv, tmp
23 ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE
24 ldr \rv, =ARMADA_370_XP_REGS_VIRT_BASE
25 orr \rp, \rp, #0x00012000
26 orr \rv, \rv, #0x00012000
27 .endm
28
29#define UART_SHIFT 2
30#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/mxs.S b/arch/arm/include/debug/mxs.S
deleted file mode 100644
index d86951551ca1..000000000000
--- a/arch/arm/include/debug/mxs.S
+++ /dev/null
@@ -1,27 +0,0 @@
1/* arch/arm/mach-mxs/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14#ifdef CONFIG_DEBUG_IMX23_UART
15#define UART_PADDR 0x80070000
16#elif defined (CONFIG_DEBUG_IMX28_UART)
17#define UART_PADDR 0x80074000
18#endif
19
20#define UART_VADDR 0xfe100000
21
22 .macro addruart, rp, rv, tmp
23 ldr \rp, =UART_PADDR @ physical
24 ldr \rv, =UART_VADDR @ virtual
25 .endm
26
27#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nomadik.S b/arch/arm/include/debug/nomadik.S
deleted file mode 100644
index 735417922ce2..000000000000
--- a/arch/arm/include/debug/nomadik.S
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11*/
12
13 .macro addruart, rp, rv, tmp
14 mov \rp, #0x00100000
15 add \rp, \rp, #0x000fb000
16 add \rv, \rp, #0xf0000000 @ virtual base
17 add \rp, \rp, #0x10000000 @ physical base address
18 .endm
19
20#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nspire.S b/arch/arm/include/debug/nspire.S
deleted file mode 100644
index 886fd276fcbc..000000000000
--- a/arch/arm/include/debug/nspire.S
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * linux/arch/arm/include/debug/nspire.S
3 *
4 * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2, as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#define NSPIRE_EARLY_UART_PHYS_BASE 0x90020000
13#define NSPIRE_EARLY_UART_VIRT_BASE 0xfee20000
14
15.macro addruart, rp, rv, tmp
16 ldr \rp, =(NSPIRE_EARLY_UART_PHYS_BASE) @ physical base address
17 ldr \rv, =(NSPIRE_EARLY_UART_VIRT_BASE) @ virtual base address
18.endm
19
20
21#ifdef CONFIG_DEBUG_NSPIRE_CX_UART
22#include <asm/hardware/debug-pl01x.S>
23#endif
24
25#ifdef CONFIG_DEBUG_NSPIRE_CLASSIC_UART
26#define UART_SHIFT 2
27#include <asm/hardware/debug-8250.S>
28#endif
diff --git a/arch/arm/include/debug/picoxcell.S b/arch/arm/include/debug/picoxcell.S
deleted file mode 100644
index bc1f07c49cd4..000000000000
--- a/arch/arm/include/debug/picoxcell.S
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright (c) 2011 Picochip Ltd., Jamie Iles
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10#define UART_SHIFT 2
11#define PICOXCELL_UART1_BASE 0x80230000
12#define PHYS_TO_IO(x) (((x) & 0x00ffffff) | 0xfe000000)
13
14 .macro addruart, rp, rv, tmp
15 ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
16 ldr \rp, =PICOXCELL_UART1_BASE
17 .endm
18
19#include "8250_32.S"
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/debug/pl01x.S
index f9fd083eff63..37c6895b87e6 100644
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -1,4 +1,4 @@
1/* arch/arm/include/asm/hardware/debug-pl01x.S 1/* arch/arm/include/debug/pl01x.S
2 * 2 *
3 * Debugging macro include header 3 * Debugging macro include header
4 * 4 *
@@ -12,6 +12,13 @@
12*/ 12*/
13#include <linux/amba/serial.h> 13#include <linux/amba/serial.h>
14 14
15#ifdef CONFIG_DEBUG_UART_PHYS
16 .macro addruart, rp, rv, tmp
17 ldr \rp, =CONFIG_DEBUG_UART_PHYS
18 ldr \rv, =CONFIG_DEBUG_UART_VIRT
19 .endm
20#endif
21
15 .macro senduart,rd,rx 22 .macro senduart,rd,rx
16 strb \rd, [\rx, #UART01x_DR] 23 strb \rd, [\rx, #UART01x_DR]
17 .endm 24 .endm
diff --git a/arch/arm/include/debug/pxa.S b/arch/arm/include/debug/pxa.S
deleted file mode 100644
index e1e795aa3d7f..000000000000
--- a/arch/arm/include/debug/pxa.S
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * Early serial output macro for Marvell PXA/MMP SoC
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
6 *
7 * Copyright (C) 2013 Haojian Zhuang
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12*/
13
14#if defined(CONFIG_DEBUG_PXA_UART1)
15#define PXA_UART_REG_PHYS_BASE 0x40100000
16#define PXA_UART_REG_VIRT_BASE 0xf2100000
17#elif defined(CONFIG_DEBUG_MMP_UART2)
18#define PXA_UART_REG_PHYS_BASE 0xd4017000
19#define PXA_UART_REG_VIRT_BASE 0xfe017000
20#elif defined(CONFIG_DEBUG_MMP_UART3)
21#define PXA_UART_REG_PHYS_BASE 0xd4018000
22#define PXA_UART_REG_VIRT_BASE 0xfe018000
23#else
24#error "Select uart for DEBUG_LL"
25#endif
26
27 .macro addruart, rp, rv, tmp
28 ldr \rp, =PXA_UART_REG_PHYS_BASE
29 ldr \rv, =PXA_UART_REG_VIRT_BASE
30 .endm
31
32#define UART_SHIFT 2
33#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/rockchip.S b/arch/arm/include/debug/rockchip.S
deleted file mode 100644
index cfd883e69588..000000000000
--- a/arch/arm/include/debug/rockchip.S
+++ /dev/null
@@ -1,42 +0,0 @@
1/*
2 * Early serial output macro for Rockchip SoCs
3 *
4 * Copyright (C) 2012 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#if defined(CONFIG_DEBUG_RK29_UART0)
14#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20060000
15#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed60000
16#elif defined(CONFIG_DEBUG_RK29_UART1)
17#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
18#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
19#elif defined(CONFIG_DEBUG_RK29_UART2)
20#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
21#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
22#elif defined(CONFIG_DEBUG_RK3X_UART0)
23#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10124000
24#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb24000
25#elif defined(CONFIG_DEBUG_RK3X_UART1)
26#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10126000
27#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb26000
28#elif defined(CONFIG_DEBUG_RK3X_UART2)
29#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
30#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
31#elif defined(CONFIG_DEBUG_RK3X_UART3)
32#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
33#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
34#endif
35
36 .macro addruart, rp, rv, tmp
37 ldr \rp, =ROCKCHIP_UART_DEBUG_PHYS_BASE
38 ldr \rv, =ROCKCHIP_UART_DEBUG_VIRT_BASE
39 .endm
40
41#define UART_SHIFT 2
42#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/socfpga.S b/arch/arm/include/debug/socfpga.S
deleted file mode 100644
index 966b2f994946..000000000000
--- a/arch/arm/include/debug/socfpga.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Copyright (C) 1994-1999 Russell King
3 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#define UART_SHIFT 2
11#define DEBUG_LL_UART_OFFSET 0x00002000
12
13 .macro addruart, rp, rv, tmp
14 mov \rp, #DEBUG_LL_UART_OFFSET
15 orr \rp, \rp, #0x00c00000
16 orr \rv, \rp, #0xfe000000 @ virtual base
17 orr \rp, \rp, #0xff000000 @ physical base
18 .endm
19
20#include "8250_32.S"
21
diff --git a/arch/arm/include/debug/sunxi.S b/arch/arm/include/debug/sunxi.S
deleted file mode 100644
index 04eb56d5db2c..000000000000
--- a/arch/arm/include/debug/sunxi.S
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Early serial output macro for Allwinner A1X SoCs
3 *
4 * Copyright (C) 2012 Maxime Ripard
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#if defined(CONFIG_DEBUG_SUNXI_UART0)
14#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28000
15#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28000
16#elif defined(CONFIG_DEBUG_SUNXI_UART1)
17#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28400
18#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28400
19#endif
20
21 .macro addruart, rp, rv, tmp
22 ldr \rp, =SUNXI_UART_DEBUG_PHYS_BASE
23 ldr \rv, =SUNXI_UART_DEBUG_VIRT_BASE
24 .endm
25
26#define UART_SHIFT 2
27#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index 883d7c22fd9d..be6a720dd183 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -221,3 +221,32 @@
2211002: 2211002:
222#endif 222#endif
223 .endm 223 .endm
224
225/*
226 * Storage for the state maintained by the macros above.
227 *
228 * In the kernel proper, this data is located in arch/arm/mach-tegra/common.c.
229 * That's because this header is included from multiple files, and we only
230 * want a single copy of the data. In particular, the UART probing code above
231 * assumes it's running using physical addresses. This is true when this file
232 * is included from head.o, but not when included from debug.o. So we need
233 * to share the probe results between the two copies, rather than having
234 * to re-run the probing again later.
235 *
236 * In the decompressor, we put the symbol/storage right here, since common.c
237 * isn't included in the decompressor build. This symbol gets put in .text
238 * even though it's really data, since .data is discarded from the
239 * decompressor. Luckily, .text is writeable in the decompressor, unless
240 * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
241 */
242#if defined(ZIMAGE)
243tegra_uart_config:
244 /* Debug UART initialization required */
245 .word 1
246 /* Debug UART physical address */
247 .word 0
248 /* Debug UART virtual address */
249 .word 0
250 /* Scratch space for debug macro */
251 .word 0
252#endif
diff --git a/arch/arm/include/debug/u300.S b/arch/arm/include/debug/u300.S
deleted file mode 100644
index 6f04f08a203c..000000000000
--- a/arch/arm/include/debug/u300.S
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright (C) 2006-2013 ST-Ericsson AB
3 * License terms: GNU General Public License (GPL) version 2
4 * Debugging macro include header.
5 * Author: Linus Walleij <linus.walleij@stericsson.com>
6 */
7#define U300_SLOW_PER_PHYS_BASE 0xc0010000
8#define U300_SLOW_PER_VIRT_BASE 0xff000000
9
10 .macro addruart, rp, rv, tmp
11 /* If we move the address using MMU, use this. */
12 ldr \rp, = U300_SLOW_PER_PHYS_BASE @ MMU off, physical address
13 ldr \rv, = U300_SLOW_PER_VIRT_BASE @ MMU on, virtual address
14 orr \rp, \rp, #0x00003000
15 orr \rv, \rv, #0x00003000
16 .endm
17
18#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S
index fbd24beeb1fa..aa7f63a8b5e0 100644
--- a/arch/arm/include/debug/ux500.S
+++ b/arch/arm/include/debug/ux500.S
@@ -45,4 +45,4 @@
45 ldr \rv, =UART_VIRT_BASE @ yes, virtual address 45 ldr \rv, =UART_VIRT_BASE @ yes, virtual address
46 .endm 46 .endm
47 47
48#include <asm/hardware/debug-pl01x.S> 48#include <debug/pl01x.S>
diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S
index acafb229e2b6..524acd5a223e 100644
--- a/arch/arm/include/debug/vexpress.S
+++ b/arch/arm/include/debug/vexpress.S
@@ -47,51 +47,5 @@
47 47
48 .endm 48 .endm
49 49
50#include <asm/hardware/debug-pl01x.S> 50#include <debug/pl01x.S>
51
52#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9)
53
54 .macro addruart,rp,rv,tmp
55 mov \rp, #DEBUG_LL_UART_OFFSET
56 orr \rv, \rp, #DEBUG_LL_VIRT_BASE
57 orr \rp, \rp, #DEBUG_LL_PHYS_BASE
58 .endm
59
60#include <asm/hardware/debug-pl01x.S>
61
62#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1)
63
64 .macro addruart,rp,rv,tmp
65 mov \rp, #DEBUG_LL_UART_OFFSET_RS1
66 orr \rv, \rp, #DEBUG_LL_VIRT_BASE
67 orr \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1
68 .endm
69
70#include <asm/hardware/debug-pl01x.S>
71
72#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX)
73
74 .macro addruart,rp,tmp,tmp2
75 ldr \rp, =DEBUG_LL_UART_PHYS_CRX
76 .endm
77
78#include <asm/hardware/debug-pl01x.S>
79
80#else /* CONFIG_DEBUG_LL_UART_NONE */
81
82 .macro addruart, rp, rv, tmp
83 /* Safe dummy values */
84 mov \rp, #0
85 mov \rv, #DEBUG_LL_VIRT_BASE
86 .endm
87
88 .macro senduart,rd,rx
89 .endm
90
91 .macro waituart,rd,rx
92 .endm
93
94 .macro busyuart,rd,rx
95 .endm
96
97#endif 51#endif
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 86d10dd47dc4..5140df5f23aa 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
24obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o 24obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
25 25
26ifeq ($(CONFIG_CPU_V7M),y) 26ifeq ($(CONFIG_CPU_V7M),y)
27obj-y += entry-v7m.o 27obj-y += entry-v7m.o v7m.o
28else 28else
29obj-y += entry-armv.o 29obj-y += entry-armv.o
30endif 30endif
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index 9edc9692332d..ec4164da6e30 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -7,9 +7,10 @@ static inline void save_atags(struct tag *tags) { }
7void convert_to_tag_list(struct tag *tags); 7void convert_to_tag_list(struct tag *tags);
8 8
9#ifdef CONFIG_ATAGS 9#ifdef CONFIG_ATAGS
10struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr); 10const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
11 unsigned int machine_nr);
11#else 12#else
12static inline struct machine_desc * 13static inline const struct machine_desc *
13setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) 14setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
14{ 15{
15 early_print("no ATAGS support: can't continue\n"); 16 early_print("no ATAGS support: can't continue\n");
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 14512e6931d8..8c14de8180c0 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -178,11 +178,11 @@ static void __init squash_mem_tags(struct tag *tag)
178 tag->hdr.tag = ATAG_NONE; 178 tag->hdr.tag = ATAG_NONE;
179} 179}
180 180
181struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer, 181const struct machine_desc * __init
182 unsigned int machine_nr) 182setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
183{ 183{
184 struct tag *tags = (struct tag *)&default_tags; 184 struct tag *tags = (struct tag *)&default_tags;
185 struct machine_desc *mdesc = NULL, *p; 185 const struct machine_desc *mdesc = NULL, *p;
186 char *from = default_command_line; 186 char *from = default_command_line;
187 187
188 default_tags.mem.start = PHYS_OFFSET; 188 default_tags.mem.start = PHYS_OFFSET;
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 261fcc826169..88e14d74b6de 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -525,11 +525,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
525 * Assign resources. 525 * Assign resources.
526 */ 526 */
527 pci_bus_assign_resources(bus); 527 pci_bus_assign_resources(bus);
528
529 /*
530 * Enable bridges
531 */
532 pci_enable_bridges(bus);
533 } 528 }
534 529
535 /* 530 /*
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8bc727c..f35906b3d8c9 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void)
169 } 169 }
170} 170}
171 171
172bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
173{
174 return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
175}
176
172/** 177/**
173 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel 178 * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
174 * @dt_phys: physical address of dt blob 179 * @dt_phys: physical address of dt blob
@@ -176,10 +181,10 @@ void __init arm_dt_init_cpu_maps(void)
176 * If a dtb was passed to the kernel in r2, then use it to choose the 181 * If a dtb was passed to the kernel in r2, then use it to choose the
177 * correct machine_desc and to setup the system. 182 * correct machine_desc and to setup the system.
178 */ 183 */
179struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) 184const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
180{ 185{
181 struct boot_param_header *devtree; 186 struct boot_param_header *devtree;
182 struct machine_desc *mdesc, *mdesc_best = NULL; 187 const struct machine_desc *mdesc, *mdesc_best = NULL;
183 unsigned int score, mdesc_score = ~1; 188 unsigned int score, mdesc_score = ~1;
184 unsigned long dt_root; 189 unsigned long dt_root;
185 const char *model; 190 const char *model;
@@ -188,7 +193,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
188 DT_MACHINE_START(GENERIC_DT, "Generic DT based system") 193 DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
189 MACHINE_END 194 MACHINE_END
190 195
191 mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT; 196 mdesc_best = &__mach_desc_GENERIC_DT;
192#endif 197#endif
193 198
194 if (!dt_phys) 199 if (!dt_phys)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d40d0ef389db..9cbe70c8b0ef 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
357 .endm 357 .endm
358 358
359 .macro kuser_cmpxchg_check 359 .macro kuser_cmpxchg_check
360#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 360#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
361 !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
361#ifndef CONFIG_MMU 362#ifndef CONFIG_MMU
362#warning "NPTL on non MMU needs fixing" 363#warning "NPTL on non MMU needs fixing"
363#else 364#else
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 94104bf69719..74ad15d1a065 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine 442 ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
443 443
444 add r1, sp, #S_OFF 444 add r1, sp, #S_OFF
4452: mov why, #0 @ no longer a real syscall
446 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) 445 cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
447 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 446 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
448 bcs arm_syscall 447 bcs arm_syscall
4482: mov why, #0 @ no longer a real syscall
449 b sys_ni_syscall @ not private func 449 b sys_ni_syscall @ not private func
450 450
451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) 451#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 25442f451148..918875d96d5d 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
84 84
85void set_fiq_handler(void *start, unsigned int length) 85void set_fiq_handler(void *start, unsigned int length)
86{ 86{
87#if defined(CONFIG_CPU_USE_DOMAINS)
88 void *base = (void *)0xffff0000;
89#else
90 void *base = vectors_page; 87 void *base = vectors_page;
91#endif
92 unsigned offset = FIQ_OFFSET; 88 unsigned offset = FIQ_OFFSET;
93 89
94 memcpy(base + offset, start, length); 90 memcpy(base + offset, start, length);
91 if (!cache_is_vipt_nonaliasing())
92 flush_icache_range((unsigned long)base + offset, offset +
93 length);
95 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); 94 flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
96 if (!vectors_high())
97 flush_icache_range(offset, offset + length);
98} 95}
99 96
100int claim_fiq(struct fiq_handler *f) 97int claim_fiq(struct fiq_handler *f)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..57221e349a7c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/mach-types.h> 17#include <asm/mach-types.h>
18#include <asm/smp_plat.h>
18#include <asm/system_misc.h> 19#include <asm/system_misc.h>
19 20
20extern const unsigned char relocate_new_kernel[]; 21extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image)
39 int i, err; 40 int i, err;
40 41
41 /* 42 /*
43 * Validate that if the current HW supports SMP, then the SW supports
44 * and implements CPU hotplug for the current HW. If not, we won't be
45 * able to kexec reliably, so fail the prepare operation.
46 */
47 if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
48 return -EINVAL;
49
50 /*
42 * No segment at default ATAGs address. try to locate 51 * No segment at default ATAGs address. try to locate
43 * a dtb using magic. 52 * a dtb using magic.
44 */ 53 */
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
73 crash_save_cpu(&regs, smp_processor_id()); 82 crash_save_cpu(&regs, smp_processor_id());
74 flush_cache_all(); 83 flush_cache_all();
75 84
85 set_cpu_online(smp_processor_id(), false);
76 atomic_dec(&waiting_for_crash_ipi); 86 atomic_dec(&waiting_for_crash_ipi);
77 while (1) 87 while (1)
78 cpu_relax(); 88 cpu_relax();
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image)
134 unsigned long reboot_code_buffer_phys; 144 unsigned long reboot_code_buffer_phys;
135 void *reboot_code_buffer; 145 void *reboot_code_buffer;
136 146
137 if (num_online_cpus() > 1) { 147 /*
138 pr_err("kexec: error: multiple CPUs still online\n"); 148 * This can only happen if machine_shutdown() failed to disable some
139 return; 149 * CPU, and that can only happen if the checks in
140 } 150 * machine_kexec_prepare() were not correct. If this fails, we can't
151 * reliably kexec anyway, so BUG_ON is appropriate.
152 */
153 BUG_ON(num_online_cpus() > 1);
141 154
142 page_list = image->head & PAGE_MASK; 155 page_list = image->head & PAGE_MASK;
143 156
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 85c3fb6c93c2..084dc8896986 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -292,12 +292,20 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
292 maps[ARM_SEC_CORE].unw_sec = s; 292 maps[ARM_SEC_CORE].unw_sec = s;
293 else if (strcmp(".ARM.exidx.exit.text", secname) == 0) 293 else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
294 maps[ARM_SEC_EXIT].unw_sec = s; 294 maps[ARM_SEC_EXIT].unw_sec = s;
295 else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
296 maps[ARM_SEC_UNLIKELY].unw_sec = s;
297 else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
298 maps[ARM_SEC_HOT].unw_sec = s;
295 else if (strcmp(".init.text", secname) == 0) 299 else if (strcmp(".init.text", secname) == 0)
296 maps[ARM_SEC_INIT].txt_sec = s; 300 maps[ARM_SEC_INIT].txt_sec = s;
297 else if (strcmp(".text", secname) == 0) 301 else if (strcmp(".text", secname) == 0)
298 maps[ARM_SEC_CORE].txt_sec = s; 302 maps[ARM_SEC_CORE].txt_sec = s;
299 else if (strcmp(".exit.text", secname) == 0) 303 else if (strcmp(".exit.text", secname) == 0)
300 maps[ARM_SEC_EXIT].txt_sec = s; 304 maps[ARM_SEC_EXIT].txt_sec = s;
305 else if (strcmp(".text.unlikely", secname) == 0)
306 maps[ARM_SEC_UNLIKELY].txt_sec = s;
307 else if (strcmp(".text.hot", secname) == 0)
308 maps[ARM_SEC_HOT].txt_sec = s;
301 } 309 }
302 310
303 for (i = 0; i < ARM_SEC_MAX; i++) 311 for (i = 0; i < ARM_SEC_MAX; i++)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533f..e186ee1e63f6 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
53static int 53static int
54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 54armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
55{ 55{
56 int mapping = (*event_map)[config]; 56 int mapping;
57
58 if (config >= PERF_COUNT_HW_MAX)
59 return -EINVAL;
60
61 mapping = (*event_map)[config];
57 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 62 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
58} 63}
59 64
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
253 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 258 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
254 struct pmu *leader_pmu = event->group_leader->pmu; 259 struct pmu *leader_pmu = event->group_leader->pmu;
255 260
261 if (is_software_event(event))
262 return 1;
263
256 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) 264 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
257 return 1; 265 return 1;
258 266
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index aebe0e99c153..8d6147b2001f 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
118 continue; 118 continue;
119 } 119 }
120 120
121 err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu", 121 err = request_irq(irq, handler,
122 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
122 cpu_pmu); 123 cpu_pmu);
123 if (err) { 124 if (err) {
124 pr_err("unable to request IRQ%d for ARM PMU counters\n", 125 pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 536c85fe72a8..94f6b05f9e24 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr)
462{ 462{
463 return in_gate_area(NULL, addr); 463 return in_gate_area(NULL, addr);
464} 464}
465#define is_gate_vma(vma) ((vma) = &gate_vma) 465#define is_gate_vma(vma) ((vma) == &gate_vma)
466#else 466#else
467#define is_gate_vma(vma) 0 467#define is_gate_vma(vma) 0
468#endif 468#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index afc2489ee13b..0e1e2b3afa45 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -72,10 +72,10 @@ static int __init fpe_setup(char *line)
72__setup("fpe=", fpe_setup); 72__setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void paging_init(struct machine_desc *desc); 75extern void paging_init(const struct machine_desc *desc);
76extern void sanity_check_meminfo(void); 76extern void sanity_check_meminfo(void);
77extern enum reboot_mode reboot_mode; 77extern enum reboot_mode reboot_mode;
78extern void setup_dma_zone(struct machine_desc *desc); 78extern void setup_dma_zone(const struct machine_desc *desc);
79 79
80unsigned int processor_id; 80unsigned int processor_id;
81EXPORT_SYMBOL(processor_id); 81EXPORT_SYMBOL(processor_id);
@@ -139,7 +139,7 @@ EXPORT_SYMBOL(elf_platform);
139static const char *cpu_name; 139static const char *cpu_name;
140static const char *machine_name; 140static const char *machine_name;
141static char __initdata cmd_line[COMMAND_LINE_SIZE]; 141static char __initdata cmd_line[COMMAND_LINE_SIZE];
142struct machine_desc *machine_desc __initdata; 142const struct machine_desc *machine_desc __initdata;
143 143
144static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 144static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
145#define ENDIANNESS ((char)endian_test.l) 145#define ENDIANNESS ((char)endian_test.l)
@@ -607,7 +607,7 @@ static void __init setup_processor(void)
607 607
608void __init dump_machine_table(void) 608void __init dump_machine_table(void)
609{ 609{
610 struct machine_desc *p; 610 const struct machine_desc *p;
611 611
612 early_print("Available machine support:\n\nID (hex)\tNAME\n"); 612 early_print("Available machine support:\n\nID (hex)\tNAME\n");
613 for_each_machine_desc(p) 613 for_each_machine_desc(p)
@@ -694,7 +694,7 @@ static int __init early_mem(char *p)
694} 694}
695early_param("mem", early_mem); 695early_param("mem", early_mem);
696 696
697static void __init request_standard_resources(struct machine_desc *mdesc) 697static void __init request_standard_resources(const struct machine_desc *mdesc)
698{ 698{
699 struct memblock_region *region; 699 struct memblock_region *region;
700 struct resource *res; 700 struct resource *res;
@@ -852,7 +852,7 @@ void __init hyp_mode_check(void)
852 852
853void __init setup_arch(char **cmdline_p) 853void __init setup_arch(char **cmdline_p)
854{ 854{
855 struct machine_desc *mdesc; 855 const struct machine_desc *mdesc;
856 856
857 setup_processor(); 857 setup_processor();
858 mdesc = setup_machine_fdt(__atags_pointer); 858 mdesc = setup_machine_fdt(__atags_pointer);
@@ -994,15 +994,6 @@ static int c_show(struct seq_file *m, void *v)
994 seq_printf(m, "model name\t: %s rev %d (%s)\n", 994 seq_printf(m, "model name\t: %s rev %d (%s)\n",
995 cpu_name, cpuid & 15, elf_platform); 995 cpu_name, cpuid & 15, elf_platform);
996 996
997#if defined(CONFIG_SMP)
998 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
999 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1000 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1001#else
1002 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1003 loops_per_jiffy / (500000/HZ),
1004 (loops_per_jiffy / (5000/HZ)) % 100);
1005#endif
1006 /* dump out the processor features */ 997 /* dump out the processor features */
1007 seq_puts(m, "Features\t: "); 998 seq_puts(m, "Features\t: ");
1008 999
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 3a98192a3118..72024ea8a3a6 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -144,6 +144,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
144 return -ENOSYS; 144 return -ENOSYS;
145} 145}
146 146
147int platform_can_cpu_hotplug(void)
148{
149#ifdef CONFIG_HOTPLUG_CPU
150 if (smp_ops.cpu_kill)
151 return 1;
152#endif
153
154 return 0;
155}
156
147#ifdef CONFIG_HOTPLUG_CPU 157#ifdef CONFIG_HOTPLUG_CPU
148static int platform_cpu_kill(unsigned int cpu) 158static int platform_cpu_kill(unsigned int cpu)
149{ 159{
@@ -373,17 +383,8 @@ asmlinkage void secondary_start_kernel(void)
373 383
374void __init smp_cpus_done(unsigned int max_cpus) 384void __init smp_cpus_done(unsigned int max_cpus)
375{ 385{
376 int cpu; 386 printk(KERN_INFO "SMP: Total of %d processors activated.\n",
377 unsigned long bogosum = 0; 387 num_online_cpus());
378
379 for_each_online_cpu(cpu)
380 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
381
382 printk(KERN_INFO "SMP: Total of %d processors activated "
383 "(%lu.%02lu BogoMIPS).\n",
384 num_online_cpus(),
385 bogosum / (500000/HZ),
386 (bogosum / (5000/HZ)) % 100);
387 388
388 hyp_mode_check(); 389 hyp_mode_check();
389} 390}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index c2edfff573c2..83ccca303df8 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -104,7 +104,7 @@ void flush_tlb_all(void)
104 if (tlb_ops_need_broadcast()) 104 if (tlb_ops_need_broadcast())
105 on_each_cpu(ipi_flush_tlb_all, NULL, 1); 105 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
106 else 106 else
107 local_flush_tlb_all(); 107 __flush_tlb_all();
108 broadcast_tlb_a15_erratum(); 108 broadcast_tlb_a15_erratum();
109} 109}
110 110
@@ -113,7 +113,7 @@ void flush_tlb_mm(struct mm_struct *mm)
113 if (tlb_ops_need_broadcast()) 113 if (tlb_ops_need_broadcast())
114 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); 114 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
115 else 115 else
116 local_flush_tlb_mm(mm); 116 __flush_tlb_mm(mm);
117 broadcast_tlb_mm_a15_erratum(mm); 117 broadcast_tlb_mm_a15_erratum(mm);
118} 118}
119 119
@@ -126,7 +126,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
126 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, 126 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
127 &ta, 1); 127 &ta, 1);
128 } else 128 } else
129 local_flush_tlb_page(vma, uaddr); 129 __flush_tlb_page(vma, uaddr);
130 broadcast_tlb_mm_a15_erratum(vma->vm_mm); 130 broadcast_tlb_mm_a15_erratum(vma->vm_mm);
131} 131}
132 132
@@ -137,7 +137,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
137 ta.ta_start = kaddr; 137 ta.ta_start = kaddr;
138 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); 138 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
139 } else 139 } else
140 local_flush_tlb_kernel_page(kaddr); 140 __flush_tlb_kernel_page(kaddr);
141 broadcast_tlb_a15_erratum(); 141 broadcast_tlb_a15_erratum();
142} 142}
143 143
@@ -173,5 +173,5 @@ void flush_bp_all(void)
173 if (tlb_ops_need_broadcast()) 173 if (tlb_ops_need_broadcast())
174 on_each_cpu(ipi_flush_bp_all, NULL, 1); 174 on_each_cpu(ipi_flush_bp_all, NULL, 1);
175 else 175 else
176 local_flush_bp_all(); 176 __flush_bp_all();
177} 177}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index c5a59546a256..85a87370f144 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = {
74 {NULL, }, 74 {NULL, },
75}; 75};
76 76
77struct cpu_capacity { 77unsigned long *__cpu_capacity;
78 unsigned long hwid; 78#define cpu_capacity(cpu) __cpu_capacity[cpu]
79 unsigned long capacity;
80};
81
82struct cpu_capacity *cpu_capacity;
83 79
84unsigned long middle_capacity = 1; 80unsigned long middle_capacity = 1;
85 81
@@ -100,15 +96,19 @@ static void __init parse_dt_topology(void)
100 unsigned long capacity = 0; 96 unsigned long capacity = 0;
101 int alloc_size, cpu = 0; 97 int alloc_size, cpu = 0;
102 98
103 alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); 99 alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
104 cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); 100 __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
105 101
106 while ((cn = of_find_node_by_type(cn, "cpu"))) { 102 for_each_possible_cpu(cpu) {
107 const u32 *rate, *reg; 103 const u32 *rate;
108 int len; 104 int len;
109 105
110 if (cpu >= num_possible_cpus()) 106 /* too early to use cpu->of_node */
111 break; 107 cn = of_get_cpu_node(cpu, NULL);
108 if (!cn) {
109 pr_err("missing device node for CPU %d\n", cpu);
110 continue;
111 }
112 112
113 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) 113 for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
114 if (of_device_is_compatible(cn, cpu_eff->compatible)) 114 if (of_device_is_compatible(cn, cpu_eff->compatible))
@@ -124,12 +124,6 @@ static void __init parse_dt_topology(void)
124 continue; 124 continue;
125 } 125 }
126 126
127 reg = of_get_property(cn, "reg", &len);
128 if (!reg || len != 4) {
129 pr_err("%s missing reg property\n", cn->full_name);
130 continue;
131 }
132
133 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; 127 capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
134 128
135 /* Save min capacity of the system */ 129 /* Save min capacity of the system */
@@ -140,13 +134,9 @@ static void __init parse_dt_topology(void)
140 if (capacity > max_capacity) 134 if (capacity > max_capacity)
141 max_capacity = capacity; 135 max_capacity = capacity;
142 136
143 cpu_capacity[cpu].capacity = capacity; 137 cpu_capacity(cpu) = capacity;
144 cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
145 } 138 }
146 139
147 if (cpu < num_possible_cpus())
148 cpu_capacity[cpu].hwid = (unsigned long)(-1);
149
150 /* If min and max capacities are equals, we bypass the update of the 140 /* If min and max capacities are equals, we bypass the update of the
151 * cpu_scale because all CPUs have the same capacity. Otherwise, we 141 * cpu_scale because all CPUs have the same capacity. Otherwise, we
152 * compute a middle_capacity factor that will ensure that the capacity 142 * compute a middle_capacity factor that will ensure that the capacity
@@ -154,9 +144,7 @@ static void __init parse_dt_topology(void)
154 * SCHED_POWER_SCALE, which is the default value, but with the 144 * SCHED_POWER_SCALE, which is the default value, but with the
155 * constraint explained near table_efficiency[]. 145 * constraint explained near table_efficiency[].
156 */ 146 */
157 if (min_capacity == max_capacity) 147 if (4*max_capacity < (3*(max_capacity + min_capacity)))
158 cpu_capacity[0].hwid = (unsigned long)(-1);
159 else if (4*max_capacity < (3*(max_capacity + min_capacity)))
160 middle_capacity = (min_capacity + max_capacity) 148 middle_capacity = (min_capacity + max_capacity)
161 >> (SCHED_POWER_SHIFT+1); 149 >> (SCHED_POWER_SHIFT+1);
162 else 150 else
@@ -170,23 +158,12 @@ static void __init parse_dt_topology(void)
170 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the 158 * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
171 * function returns directly for SMP system. 159 * function returns directly for SMP system.
172 */ 160 */
173void update_cpu_power(unsigned int cpu, unsigned long hwid) 161void update_cpu_power(unsigned int cpu)
174{ 162{
175 unsigned int idx = 0; 163 if (!cpu_capacity(cpu))
176
177 /* look for the cpu's hwid in the cpu capacity table */
178 for (idx = 0; idx < num_possible_cpus(); idx++) {
179 if (cpu_capacity[idx].hwid == hwid)
180 break;
181
182 if (cpu_capacity[idx].hwid == -1)
183 return;
184 }
185
186 if (idx == num_possible_cpus())
187 return; 164 return;
188 165
189 set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); 166 set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
190 167
191 printk(KERN_INFO "CPU%u: update cpu_power %lu\n", 168 printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
192 cpu, arch_scale_freq_power(NULL, cpu)); 169 cpu, arch_scale_freq_power(NULL, cpu));
@@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid)
194 171
195#else 172#else
196static inline void parse_dt_topology(void) {} 173static inline void parse_dt_topology(void) {}
197static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} 174static inline void update_cpu_power(unsigned int cpuid) {}
198#endif 175#endif
199 176
200 /* 177 /*
@@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid)
281 258
282 update_siblings_masks(cpuid); 259 update_siblings_masks(cpuid);
283 260
284 update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); 261 update_cpu_power(cpuid);
285 262
286 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", 263 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
287 cpuid, cpu_topology[cpuid].thread_id, 264 cpuid, cpu_topology[cpuid].thread_id,
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index ab517fcce21b..8fcda140358d 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -497,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs)
497 return regs->ARM_r0; 497 return regs->ARM_r0;
498} 498}
499 499
500static long do_cache_op_restart(struct restart_block *);
501
500static inline int 502static inline int
501do_cache_op(unsigned long start, unsigned long end, int flags) 503__do_cache_op(unsigned long start, unsigned long end)
502{ 504{
503 struct mm_struct *mm = current->active_mm; 505 int ret;
504 struct vm_area_struct *vma; 506 unsigned long chunk = PAGE_SIZE;
507
508 do {
509 if (signal_pending(current)) {
510 struct thread_info *ti = current_thread_info();
511
512 ti->restart_block = (struct restart_block) {
513 .fn = do_cache_op_restart,
514 };
515
516 ti->arm_restart_block = (struct arm_restart_block) {
517 {
518 .cache = {
519 .start = start,
520 .end = end,
521 },
522 },
523 };
524
525 return -ERESTART_RESTARTBLOCK;
526 }
527
528 ret = flush_cache_user_range(start, start + chunk);
529 if (ret)
530 return ret;
505 531
532 cond_resched();
533 start += chunk;
534 } while (start < end);
535
536 return 0;
537}
538
539static long do_cache_op_restart(struct restart_block *unused)
540{
541 struct arm_restart_block *restart_block;
542
543 restart_block = &current_thread_info()->arm_restart_block;
544 return __do_cache_op(restart_block->cache.start,
545 restart_block->cache.end);
546}
547
548static inline int
549do_cache_op(unsigned long start, unsigned long end, int flags)
550{
506 if (end < start || flags) 551 if (end < start || flags)
507 return -EINVAL; 552 return -EINVAL;
508 553
509 down_read(&mm->mmap_sem); 554 if (!access_ok(VERIFY_READ, start, end - start))
510 vma = find_vma(mm, start); 555 return -EFAULT;
511 if (vma && vma->vm_start < end) {
512 if (start < vma->vm_start)
513 start = vma->vm_start;
514 if (end > vma->vm_end)
515 end = vma->vm_end;
516 556
517 up_read(&mm->mmap_sem); 557 return __do_cache_op(start, end);
518 return flush_cache_user_range(start, end);
519 }
520 up_read(&mm->mmap_sem);
521 return -EINVAL;
522} 558}
523 559
524/* 560/*
diff --git a/arch/arm/kernel/v7m.c b/arch/arm/kernel/v7m.c
new file mode 100644
index 000000000000..4d2cba94f5cc
--- /dev/null
+++ b/arch/arm/kernel/v7m.c
@@ -0,0 +1,19 @@
1/*
2 * Copyright (C) 2013 Uwe Kleine-Koenig for Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or modify it under
5 * the terms of the GNU General Public License version 2 as published by the
6 * Free Software Foundation.
7 */
8#include <linux/io.h>
9#include <linux/reboot.h>
10#include <asm/barrier.h>
11#include <asm/v7m.h>
12
13void armv7m_restart(enum reboot_mode mode, const char *cmd)
14{
15 dsb();
16 __raw_writel(V7M_SCB_AIRCR_VECTKEY | V7M_SCB_AIRCR_SYSRESETREQ,
17 BASEADDR_V7M_SCB + V7M_SCB_AIRCR);
18 dsb();
19}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 741f66a2edbd..9c697db2787e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -219,6 +219,10 @@ long kvm_arch_dev_ioctl(struct file *filp,
219 return -EINVAL; 219 return -EINVAL;
220} 220}
221 221
222void kvm_arch_memslots_updated(struct kvm *kvm)
223{
224}
225
222int kvm_arch_prepare_memory_region(struct kvm *kvm, 226int kvm_arch_prepare_memory_region(struct kvm *kvm,
223 struct kvm_memory_slot *memslot, 227 struct kvm_memory_slot *memslot,
224 struct kvm_userspace_memory_region *mem, 228 struct kvm_userspace_memory_region *mem,
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a5199070430..db9cf692d4dd 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
146#define access_pmintenclr pm_fake 146#define access_pmintenclr pm_fake
147 147
148/* Architected CP15 registers. 148/* Architected CP15 registers.
149 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 149 * CRn denotes the primary register number, but is copied to the CRm in the
150 * user space API for 64-bit register access in line with the terminology used
151 * in the ARM ARM.
152 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
153 * registers preceding 32-bit ones.
150 */ 154 */
151static const struct coproc_reg cp15_regs[] = { 155static const struct coproc_reg cp15_regs[] = {
152 /* CSSELR: swapped by interrupt.S. */ 156 /* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
154 NULL, reset_unknown, c0_CSSELR }, 158 NULL, reset_unknown, c0_CSSELR },
155 159
156 /* TTBR0/TTBR1: swapped by interrupt.S. */ 160 /* TTBR0/TTBR1: swapped by interrupt.S. */
157 { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, 161 { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
158 { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, 162 { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
159 163
160 /* TTBCR: swapped by interrupt.S. */ 164 /* TTBCR: swapped by interrupt.S. */
161 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 165 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
182 NULL, reset_unknown, c6_IFAR }, 186 NULL, reset_unknown, c6_IFAR },
183 187
184 /* PAR swapped by interrupt.S */ 188 /* PAR swapped by interrupt.S */
185 { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 189 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
186 190
187 /* 191 /*
188 * DC{C,I,CI}SW operations: 192 * DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
399 | KVM_REG_ARM_OPC1_MASK)) 403 | KVM_REG_ARM_OPC1_MASK))
400 return false; 404 return false;
401 params->is_64bit = true; 405 params->is_64bit = true;
402 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 406 /* CRm to CRn: see cp15_to_index for details */
407 params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
403 >> KVM_REG_ARM_CRM_SHIFT); 408 >> KVM_REG_ARM_CRM_SHIFT);
404 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 409 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
405 >> KVM_REG_ARM_OPC1_SHIFT); 410 >> KVM_REG_ARM_OPC1_SHIFT);
406 params->Op2 = 0; 411 params->Op2 = 0;
407 params->CRn = 0; 412 params->CRm = 0;
408 return true; 413 return true;
409 default: 414 default:
410 return false; 415 return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
898 if (reg->is_64) { 903 if (reg->is_64) {
899 val |= KVM_REG_SIZE_U64; 904 val |= KVM_REG_SIZE_U64;
900 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 905 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
901 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 906 /*
907 * CRn always denotes the primary coproc. reg. nr. for the
908 * in-kernel representation, but the user space API uses the
909 * CRm for the encoding, because it is modelled after the
910 * MRRC/MCRR instructions: see the ARM ARM rev. c page
911 * B3-1445
912 */
913 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
902 } else { 914 } else {
903 val |= KVM_REG_SIZE_U32; 915 val |= KVM_REG_SIZE_U32;
904 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 916 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e4799..0461d5c8d3de 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
135 return -1; 135 return -1;
136 if (i1->CRn != i2->CRn) 136 if (i1->CRn != i2->CRn)
137 return i1->CRn - i2->CRn; 137 return i1->CRn - i2->CRn;
138 if (i1->is_64 != i2->is_64)
139 return i2->is_64 - i1->is_64;
138 if (i1->CRm != i2->CRm) 140 if (i1->CRm != i2->CRm)
139 return i1->CRm - i2->CRm; 141 return i1->CRm - i2->CRm;
140 if (i1->Op1 != i2->Op1) 142 if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
145 147
146#define CRn(_x) .CRn = _x 148#define CRn(_x) .CRn = _x
147#define CRm(_x) .CRm = _x 149#define CRm(_x) .CRm = _x
150#define CRm64(_x) .CRn = _x, .CRm = 0
148#define Op1(_x) .Op1 = _x 151#define Op1(_x) .Op1 = _x
149#define Op2(_x) .Op2 = _x 152#define Op2(_x) .Op2 = _x
150#define is64 .is_64 = true 153#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0cf..cf93472b9dd6 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
114 114
115/* 115/*
116 * A15-specific CP15 registers. 116 * A15-specific CP15 registers.
117 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 117 * CRn denotes the primary register number, but is copied to the CRm in the
118 * user space API for 64-bit register access in line with the terminology used
119 * in the ARM ARM.
120 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
121 * registers preceding 32-bit ones.
118 */ 122 */
119static const struct coproc_reg a15_regs[] = { 123static const struct coproc_reg a15_regs[] = {
120 /* MPIDR: we use VMPIDR for guest access. */ 124 /* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index f048338135f7..1b9844d369cc 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -142,7 +142,7 @@ target: @ We're now in the trampoline code, switch page tables
142 142
143 @ Invalidate the old TLBs 143 @ Invalidate the old TLBs
144 mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH 144 mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
145 dsb 145 dsb ish
146 146
147 eret 147 eret
148 148
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 16cd4ba5d7fd..ddc15539bad2 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -55,7 +55,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
55 mcrr p15, 6, r2, r3, c2 @ Write VTTBR 55 mcrr p15, 6, r2, r3, c2 @ Write VTTBR
56 isb 56 isb
57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) 57 mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
58 dsb 58 dsb ish
59 isb 59 isb
60 mov r2, #0 60 mov r2, #0
61 mov r3, #0 61 mov r3, #0
@@ -79,7 +79,7 @@ ENTRY(__kvm_flush_vm_context)
79 mcr p15, 4, r0, c8, c3, 4 79 mcr p15, 4, r0, c8, c3, 4
80 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ 80 /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
81 mcr p15, 0, r0, c7, c1, 0 81 mcr p15, 0, r0, c7, c1, 0
82 dsb 82 dsb ish
83 isb @ Not necessary if followed by eret 83 isb @ Not necessary if followed by eret
84 84
85 bx lr 85 bx lr
@@ -492,10 +492,10 @@ __kvm_hyp_code_end:
492 .section ".rodata" 492 .section ".rodata"
493 493
494und_die_str: 494und_die_str:
495 .ascii "unexpected undefined exception in Hyp mode at: %#08x" 495 .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
496pabt_die_str: 496pabt_die_str:
497 .ascii "unexpected prefetch abort in Hyp mode at: %#08x" 497 .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
498dabt_die_str: 498dabt_die_str:
499 .ascii "unexpected data abort in Hyp mode at: %#08x" 499 .ascii "unexpected data abort in Hyp mode at: %#08x\n"
500svc_die_str: 500svc_die_str:
501 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x" 501 .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a2833..0c25d9487d53 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 63static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
64 struct kvm_exit_mmio *mmio) 64 struct kvm_exit_mmio *mmio)
65{ 65{
66 unsigned long rt, len; 66 unsigned long rt;
67 int len;
67 bool is_write, sign_extend; 68 bool is_write, sign_extend;
68 69
69 if (kvm_vcpu_dabt_isextabt(vcpu)) { 70 if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b4..b0de86b56c13 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
85 return p; 85 return p;
86} 86}
87 87
88static bool page_empty(void *ptr)
89{
90 struct page *ptr_page = virt_to_page(ptr);
91 return page_count(ptr_page) == 1;
92}
93
88static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) 94static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
89{ 95{
90 pmd_t *pmd_table = pmd_offset(pud, 0); 96 pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
103 put_page(virt_to_page(pmd)); 109 put_page(virt_to_page(pmd));
104} 110}
105 111
106static bool pmd_empty(pmd_t *pmd)
107{
108 struct page *pmd_page = virt_to_page(pmd);
109 return page_count(pmd_page) == 1;
110}
111
112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) 112static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
113{ 113{
114 if (pte_present(*pte)) { 114 if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
118 } 118 }
119} 119}
120 120
121static bool pte_empty(pte_t *pte)
122{
123 struct page *pte_page = virt_to_page(pte);
124 return page_count(pte_page) == 1;
125}
126
127static void unmap_range(struct kvm *kvm, pgd_t *pgdp, 121static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
128 unsigned long long start, u64 size) 122 unsigned long long start, u64 size)
129{ 123{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
132 pmd_t *pmd; 126 pmd_t *pmd;
133 pte_t *pte; 127 pte_t *pte;
134 unsigned long long addr = start, end = start + size; 128 unsigned long long addr = start, end = start + size;
135 u64 range; 129 u64 next;
136 130
137 while (addr < end) { 131 while (addr < end) {
138 pgd = pgdp + pgd_index(addr); 132 pgd = pgdp + pgd_index(addr);
139 pud = pud_offset(pgd, addr); 133 pud = pud_offset(pgd, addr);
140 if (pud_none(*pud)) { 134 if (pud_none(*pud)) {
141 addr += PUD_SIZE; 135 addr = pud_addr_end(addr, end);
142 continue; 136 continue;
143 } 137 }
144 138
145 pmd = pmd_offset(pud, addr); 139 pmd = pmd_offset(pud, addr);
146 if (pmd_none(*pmd)) { 140 if (pmd_none(*pmd)) {
147 addr += PMD_SIZE; 141 addr = pmd_addr_end(addr, end);
148 continue; 142 continue;
149 } 143 }
150 144
151 pte = pte_offset_kernel(pmd, addr); 145 pte = pte_offset_kernel(pmd, addr);
152 clear_pte_entry(kvm, pte, addr); 146 clear_pte_entry(kvm, pte, addr);
153 range = PAGE_SIZE; 147 next = addr + PAGE_SIZE;
154 148
155 /* If we emptied the pte, walk back up the ladder */ 149 /* If we emptied the pte, walk back up the ladder */
156 if (pte_empty(pte)) { 150 if (page_empty(pte)) {
157 clear_pmd_entry(kvm, pmd, addr); 151 clear_pmd_entry(kvm, pmd, addr);
158 range = PMD_SIZE; 152 next = pmd_addr_end(addr, end);
159 if (pmd_empty(pmd)) { 153 if (page_empty(pmd) && !page_empty(pud)) {
160 clear_pud_entry(kvm, pud, addr); 154 clear_pud_entry(kvm, pud, addr);
161 range = PUD_SIZE; 155 next = pud_addr_end(addr, end);
162 } 156 }
163 } 157 }
164 158
165 addr += range; 159 addr = next;
166 } 160 }
167} 161}
168 162
@@ -495,7 +489,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
495 489
496 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { 490 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
497 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE); 491 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
498 kvm_set_s2pte_writable(&pte);
499 492
500 ret = mmu_topup_memory_cache(&cache, 2, 2); 493 ret = mmu_topup_memory_cache(&cache, 2, 2);
501 if (ret) 494 if (ret)
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index b7840e7aa452..71e08baee209 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -40,7 +40,7 @@ static struct kvm_regs a15_regs_reset = {
40}; 40};
41 41
42static const struct kvm_irq_level a15_vtimer_irq = { 42static const struct kvm_irq_level a15_vtimer_irq = {
43 .irq = 27, 43 { .irq = 27 },
44 .level = 1, 44 .level = 1,
45}; 45};
46 46
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index a8e73ed5ad5b..b1d640f78623 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -59,10 +59,9 @@ TRACE_EVENT(kvm_guest_fault,
59 __entry->ipa = ipa; 59 __entry->ipa = ipa;
60 ), 60 ),
61 61
62 TP_printk("guest fault at PC %#08lx (hxfar %#08lx, " 62 TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
63 "ipa %#16llx, hsr %#08lx", 63 __entry->ipa, __entry->hsr,
64 __entry->vcpu_pc, __entry->hxfar, 64 __entry->hxfar, __entry->vcpu_pc)
65 __entry->ipa, __entry->hsr)
66); 65);
67 66
68TRACE_EVENT(kvm_irq_line, 67TRACE_EVENT(kvm_irq_line,
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index af72969820b4..aaf3a8731136 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -45,3 +45,9 @@ lib-$(CONFIG_ARCH_SHARK) += io-shark.o
45 45
46$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S 46$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
47$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S 47$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
48
49ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
50 NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
51 CFLAGS_xor-neon.o += $(NEON_FLAGS)
52 lib-$(CONFIG_XOR_BLOCKS) += xor-neon.o
53endif
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
new file mode 100644
index 000000000000..f485e5a2af4b
--- /dev/null
+++ b/arch/arm/lib/xor-neon.c
@@ -0,0 +1,42 @@
1/*
2 * linux/arch/arm/lib/xor-neon.c
3 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/raid/xor.h>
12
13#ifndef __ARM_NEON__
14#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
15#endif
16
17/*
18 * Pull in the reference implementations while instructing GCC (through
19 * -ftree-vectorize) to attempt to exploit implicit parallelism and emit
20 * NEON instructions.
21 */
22#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
23#pragma GCC optimize "tree-vectorize"
24#else
25/*
26 * While older versions of GCC do not generate incorrect code, they fail to
27 * recognize the parallel nature of these functions, and emit plain ARM code,
28 * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
29 */
30#warning This code requires at least version 4.6 of GCC
31#endif
32
33#pragma GCC diagnostic ignored "-Wunused-variable"
34#include <asm-generic/xor.h>
35
36struct xor_block_template const xor_block_neon_inner = {
37 .name = "__inner_neon__",
38 .do_2 = xor_8regs_2,
39 .do_3 = xor_8regs_3,
40 .do_4 = xor_8regs_4,
41 .do_5 = xor_8regs_5,
42};
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aac..916e5a142917 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), 227 CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), 228 CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), 229 CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
230 CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
231 CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
230 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), 232 CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
231 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), 233 CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
232 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), 234 CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-at91/include/mach/at91_adc.h b/arch/arm/mach-at91/include/mach/at91_adc.h
index 8e7ed5c90817..048a57f76bd3 100644
--- a/arch/arm/mach-at91/include/mach/at91_adc.h
+++ b/arch/arm/mach-at91/include/mach/at91_adc.h
@@ -28,9 +28,12 @@
28#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1) 28#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1)
29#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */ 29#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */
30#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */ 30#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */
31#define AT91_ADC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */ 31#define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */
32#define AT91_ADC_PRESCAL_9G45 (0xff << 8)
32#define AT91_ADC_PRESCAL_(x) ((x) << 8) 33#define AT91_ADC_PRESCAL_(x) ((x) << 8)
33#define AT91_ADC_STARTUP (0x1f << 16) /* Startup Up Time */ 34#define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */
35#define AT91_ADC_STARTUP_9G45 (0x7f << 16)
36#define AT91_ADC_STARTUP_9X5 (0xf << 16)
34#define AT91_ADC_STARTUP_(x) ((x) << 16) 37#define AT91_ADC_STARTUP_(x) ((x) << 16)
35#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */ 38#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */
36#define AT91_ADC_SHTIM_(x) ((x) << 24) 39#define AT91_ADC_SHTIM_(x) ((x) << 24)
@@ -48,6 +51,9 @@
48#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */ 51#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */
49#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */ 52#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */
50 53
54#define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */
55#define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */
56
51#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */ 57#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */
52#define AT91_ADC_LDATA (0x3ff) 58#define AT91_ADC_LDATA (0x3ff)
53 59
@@ -58,4 +64,10 @@
58#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */ 64#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */
59#define AT91_ADC_DATA (0x3ff) 65#define AT91_ADC_DATA (0x3ff)
60 66
67#define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */
68
69#define AT91_ADC_TRGR_9260 AT91_ADC_MR
70#define AT91_ADC_TRGR_9G45 0x08
71#define AT91_ADC_TRGR_9X5 0xC0
72
61#endif 73#endif
diff --git a/arch/arm/mach-clps711x/Kconfig b/arch/arm/mach-clps711x/Kconfig
index 01ad4d41e728..bea6295c8c59 100644
--- a/arch/arm/mach-clps711x/Kconfig
+++ b/arch/arm/mach-clps711x/Kconfig
@@ -33,9 +33,6 @@ config ARCH_P720T
33 Say Y here if you intend to run this kernel on the ARM Prospector 33 Say Y here if you intend to run this kernel on the ARM Prospector
34 720T. 34 720T.
35 35
36config ARCH_FORTUNET
37 bool "FORTUNET"
38
39config EP72XX_ROM_BOOT 36config EP72XX_ROM_BOOT
40 bool "EP721x/EP731x ROM boot" 37 bool "EP721x/EP731x ROM boot"
41 help 38 help
diff --git a/arch/arm/mach-clps711x/Makefile b/arch/arm/mach-clps711x/Makefile
index f30ed2b496fb..f04151efd96a 100644
--- a/arch/arm/mach-clps711x/Makefile
+++ b/arch/arm/mach-clps711x/Makefile
@@ -10,5 +10,4 @@ obj-$(CONFIG_ARCH_AUTCPU12) += board-autcpu12.o
10obj-$(CONFIG_ARCH_CDB89712) += board-cdb89712.o 10obj-$(CONFIG_ARCH_CDB89712) += board-cdb89712.o
11obj-$(CONFIG_ARCH_CLEP7312) += board-clep7312.o 11obj-$(CONFIG_ARCH_CLEP7312) += board-clep7312.o
12obj-$(CONFIG_ARCH_EDB7211) += board-edb7211.o 12obj-$(CONFIG_ARCH_EDB7211) += board-edb7211.o
13obj-$(CONFIG_ARCH_FORTUNET) += board-fortunet.o
14obj-$(CONFIG_ARCH_P720T) += board-p720t.o 13obj-$(CONFIG_ARCH_P720T) += board-p720t.o
diff --git a/arch/arm/mach-clps711x/board-autcpu12.c b/arch/arm/mach-clps711x/board-autcpu12.c
index 5867aebd8d0c..f8d71a89644a 100644
--- a/arch/arm/mach-clps711x/board-autcpu12.c
+++ b/arch/arm/mach-clps711x/board-autcpu12.c
@@ -259,11 +259,7 @@ static void __init autcpu12_init(void)
259static void __init autcpu12_init_late(void) 259static void __init autcpu12_init_late(void)
260{ 260{
261 gpio_request_array(autcpu12_gpios, ARRAY_SIZE(autcpu12_gpios)); 261 gpio_request_array(autcpu12_gpios, ARRAY_SIZE(autcpu12_gpios));
262 262 platform_device_register(&autcpu12_nand_pdev);
263 if (IS_ENABLED(MTD_NAND_GPIO) && IS_ENABLED(GPIO_GENERIC_PLATFORM)) {
264 /* We are need both drivers to handle NAND */
265 platform_device_register(&autcpu12_nand_pdev);
266 }
267} 263}
268 264
269MACHINE_START(AUTCPU12, "autronix autcpu12") 265MACHINE_START(AUTCPU12, "autronix autcpu12")
diff --git a/arch/arm/mach-clps711x/board-edb7211.c b/arch/arm/mach-clps711x/board-edb7211.c
index 9dfb990f0801..fe6184ead896 100644
--- a/arch/arm/mach-clps711x/board-edb7211.c
+++ b/arch/arm/mach-clps711x/board-edb7211.c
@@ -126,21 +126,6 @@ static struct gpio edb7211_gpios[] __initconst = {
126 { EDB7211_LCDBL, GPIOF_OUT_INIT_LOW, "LCD BACKLIGHT" }, 126 { EDB7211_LCDBL, GPIOF_OUT_INIT_LOW, "LCD BACKLIGHT" },
127}; 127};
128 128
129static struct map_desc edb7211_io_desc[] __initdata = {
130 { /* Memory-mapped extra keyboard row */
131 .virtual = IO_ADDRESS(EDB7211_EXTKBD_BASE),
132 .pfn = __phys_to_pfn(EDB7211_EXTKBD_BASE),
133 .length = SZ_1M,
134 .type = MT_DEVICE,
135 },
136};
137
138void __init edb7211_map_io(void)
139{
140 clps711x_map_io();
141 iotable_init(edb7211_io_desc, ARRAY_SIZE(edb7211_io_desc));
142}
143
144/* Reserve screen memory region at the start of main system memory. */ 129/* Reserve screen memory region at the start of main system memory. */
145static void __init edb7211_reserve(void) 130static void __init edb7211_reserve(void)
146{ 131{
@@ -195,7 +180,7 @@ MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)")
195 .nr_irqs = CLPS711X_NR_IRQS, 180 .nr_irqs = CLPS711X_NR_IRQS,
196 .fixup = fixup_edb7211, 181 .fixup = fixup_edb7211,
197 .reserve = edb7211_reserve, 182 .reserve = edb7211_reserve,
198 .map_io = edb7211_map_io, 183 .map_io = clps711x_map_io,
199 .init_early = clps711x_init_early, 184 .init_early = clps711x_init_early,
200 .init_irq = clps711x_init_irq, 185 .init_irq = clps711x_init_irq,
201 .init_time = clps711x_timer_init, 186 .init_time = clps711x_timer_init,
diff --git a/arch/arm/mach-clps711x/board-fortunet.c b/arch/arm/mach-clps711x/board-fortunet.c
deleted file mode 100644
index b1561e3d7c5c..000000000000
--- a/arch/arm/mach-clps711x/board-fortunet.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * linux/arch/arm/mach-clps711x/fortunet.c
3 *
4 * Derived from linux/arch/arm/mach-integrator/arch.c
5 *
6 * Copyright (C) 2000 Deep Blue Solutions Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <linux/types.h>
23#include <linux/init.h>
24#include <linux/initrd.h>
25
26#include <mach/hardware.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29
30#include <asm/mach/arch.h>
31
32#include <asm/memory.h>
33
34#include "common.h"
35
36struct meminfo memmap = {
37 .nr_banks = 1,
38 .bank = {
39 {
40 .start = 0xC0000000,
41 .size = 0x01000000,
42 },
43 },
44};
45
46typedef struct tag_IMAGE_PARAMS
47{
48 int ramdisk_ok;
49 int ramdisk_address;
50 int ramdisk_size;
51 int ram_size;
52 int extra_param_type;
53 int extra_param_ptr;
54 int command_line;
55} IMAGE_PARAMS;
56
57#define IMAGE_PARAMS_PHYS 0xC01F0000
58
59static void __init
60fortunet_fixup(struct tag *tags, char **cmdline, struct meminfo *mi)
61{
62 IMAGE_PARAMS *ip = phys_to_virt(IMAGE_PARAMS_PHYS);
63 *cmdline = phys_to_virt(ip->command_line);
64#ifdef CONFIG_BLK_DEV_INITRD
65 if(ip->ramdisk_ok)
66 {
67 initrd_start = __phys_to_virt(ip->ramdisk_address);
68 initrd_end = initrd_start + ip->ramdisk_size;
69 }
70#endif
71 memmap.bank[0].size = ip->ram_size;
72 *mi = memmap;
73}
74
75MACHINE_START(FORTUNET, "ARM-FortuNet")
76 /* Maintainer: FortuNet Inc. */
77 .nr_irqs = CLPS711X_NR_IRQS,
78 .fixup = fortunet_fixup,
79 .map_io = clps711x_map_io,
80 .init_early = clps711x_init_early,
81 .init_irq = clps711x_init_irq,
82 .init_time = clps711x_timer_init,
83 .handle_irq = clps711x_handle_irq,
84 .restart = clps711x_restart,
85MACHINE_END
diff --git a/arch/arm/mach-clps711x/devices.c b/arch/arm/mach-clps711x/devices.c
index 856b81cf2f8a..fb77d1448fec 100644
--- a/arch/arm/mach-clps711x/devices.c
+++ b/arch/arm/mach-clps711x/devices.c
@@ -57,7 +57,7 @@ static void __init clps711x_add_syscon(void)
57 unsigned i; 57 unsigned i;
58 58
59 for (i = 0; i < ARRAY_SIZE(clps711x_syscon_res); i++) 59 for (i = 0; i < ARRAY_SIZE(clps711x_syscon_res); i++)
60 platform_device_register_simple("clps711x-syscon", i + 1, 60 platform_device_register_simple("syscon", i + 1,
61 &clps711x_syscon_res[i], 1); 61 &clps711x_syscon_res[i], 1);
62} 62}
63 63
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index af6dafbd02ef..f5c228190fdd 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1249,12 +1249,10 @@ static struct vpif_capture_config da850_vpif_capture_config = {
1249 1249
1250static struct adv7343_platform_data adv7343_pdata = { 1250static struct adv7343_platform_data adv7343_pdata = {
1251 .mode_config = { 1251 .mode_config = {
1252 .dac_3 = 1, 1252 .dac = { 1, 1, 1 },
1253 .dac_2 = 1,
1254 .dac_1 = 1,
1255 }, 1253 },
1256 .sd_config = { 1254 .sd_config = {
1257 .sd_dac_out1 = 1, 1255 .sd_dac_out = { 1 },
1258 }, 1256 },
1259}; 1257};
1260 1258
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef81..139e42da25f0 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
75 .parts = davinci_nand_partitions, 75 .parts = davinci_nand_partitions,
76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 76 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
77 .ecc_mode = NAND_ECC_HW_SYNDROME, 77 .ecc_mode = NAND_ECC_HW_SYNDROME,
78 .ecc_bits = 4,
78 .bbt_options = NAND_BBT_USE_FLASH, 79 .bbt_options = NAND_BBT_USE_FLASH,
79}; 80};
80 81
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb2..fa4bfaf952d8 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
153 .parts = davinci_evm_nandflash_partition, 153 .parts = davinci_evm_nandflash_partition,
154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), 154 .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
155 .ecc_mode = NAND_ECC_HW, 155 .ecc_mode = NAND_ECC_HW,
156 .ecc_bits = 1,
156 .bbt_options = NAND_BBT_USE_FLASH, 157 .bbt_options = NAND_BBT_USE_FLASH,
157 .timing = &davinci_evm_nandflash_timing, 158 .timing = &davinci_evm_nandflash_timing,
158}; 159};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc1..0c005e876cac 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
90 .parts = davinci_nand_partitions, 90 .parts = davinci_nand_partitions,
91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions), 91 .nr_parts = ARRAY_SIZE(davinci_nand_partitions),
92 .ecc_mode = NAND_ECC_HW, 92 .ecc_mode = NAND_ECC_HW,
93 .ecc_bits = 1,
93 .options = 0, 94 .options = 0,
94}; 95};
95 96
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf565..808233b60e3d 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
88 .parts = davinci_ntosd2_nandflash_partition, 88 .parts = davinci_ntosd2_nandflash_partition,
89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), 89 .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
90 .ecc_mode = NAND_ECC_HW, 90 .ecc_mode = NAND_ECC_HW,
91 .ecc_bits = 1,
91 .bbt_options = NAND_BBT_USE_FLASH, 92 .bbt_options = NAND_BBT_USE_FLASH,
92}; 93};
93 94
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 36aef3a7dedb..f1ac1c94ac0f 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver davinci_idle_driver = {
65 .states[1] = { 65 .states[1] = {
66 .enter = davinci_enter_idle, 66 .enter = davinci_enter_idle,
67 .exit_latency = 10, 67 .exit_latency = 10,
68 .target_residency = 100000, 68 .target_residency = 10000,
69 .flags = CPUIDLE_FLAG_TIME_VALID, 69 .flags = CPUIDLE_FLAG_TIME_VALID,
70 .name = "DDR SR", 70 .name = "DDR SR",
71 .desc = "WFI and DDR Self Refresh", 71 .desc = "WFI and DDR Self Refresh",
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
deleted file mode 100644
index b18b8ebc6508..000000000000
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ /dev/null
@@ -1,65 +0,0 @@
1/*
2 * Debugging macro for DaVinci
3 *
4 * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
5 *
6 * 2007 (c) MontaVista Software, Inc. This file is licensed under
7 * the terms of the GNU General Public License version 2. This program
8 * is licensed "as is" without any warranty of any kind, whether express
9 * or implied.
10 */
11
12/* Modifications
13 * Jan 2009 Chaithrika U S Added senduart, busyuart, waituart
14 * macros, based on debug-8250.S file
15 * but using 32-bit accesses required for
16 * some davinci devices.
17 */
18
19#include <linux/serial_reg.h>
20
21#include <mach/serial.h>
22
23#define UART_SHIFT 2
24
25#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
26#define UART_BASE DAVINCI_UART0_BASE
27#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
28#define UART_BASE DA8XX_UART1_BASE
29#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
30#define UART_BASE DA8XX_UART2_BASE
31#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
32#define UART_BASE TNETV107X_UART2_BASE
33#define UART_VIRTBASE TNETV107X_UART2_VIRT
34#else
35#error "Select a specifc port for DEBUG_LL"
36#endif
37
38#ifndef UART_VIRTBASE
39#define UART_VIRTBASE IO_ADDRESS(UART_BASE)
40#endif
41
42 .macro addruart, rp, rv, tmp
43 ldr \rp, =UART_BASE
44 ldr \rv, =UART_VIRTBASE
45 .endm
46
47 .macro senduart,rd,rx
48 str \rd, [\rx, #UART_TX << UART_SHIFT]
49 .endm
50
51 .macro busyuart,rd,rx
521002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
53 and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
54 teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
55 bne 1002b
56 .endm
57
58 .macro waituart,rd,rx
59#ifdef FLOW_CONTROL
601001: ldr \rd, [\rx, #UART_MSR << UART_SHIFT]
61 tst \rd, #UART_MSR_CTS
62 beq 1001b
63#endif
64 .endm
65
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 00247c771313..304f069ebf50 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -108,8 +108,8 @@ static void __init dove_clk_init(void)
108 orion_clkdev_add(NULL, "sdhci-dove.1", sdio1); 108 orion_clkdev_add(NULL, "sdhci-dove.1", sdio1);
109 orion_clkdev_add(NULL, "orion_nand", nand); 109 orion_clkdev_add(NULL, "orion_nand", nand);
110 orion_clkdev_add(NULL, "cafe1000-ccic.0", camera); 110 orion_clkdev_add(NULL, "cafe1000-ccic.0", camera);
111 orion_clkdev_add(NULL, "kirkwood-i2s.0", i2s0); 111 orion_clkdev_add(NULL, "mvebu-audio.0", i2s0);
112 orion_clkdev_add(NULL, "kirkwood-i2s.1", i2s1); 112 orion_clkdev_add(NULL, "mvebu-audio.1", i2s1);
113 orion_clkdev_add(NULL, "mv_crypto", crypto); 113 orion_clkdev_add(NULL, "mv_crypto", crypto);
114 orion_clkdev_add(NULL, "dove-ac97", ac97); 114 orion_clkdev_add(NULL, "dove-ac97", ac97);
115 orion_clkdev_add(NULL, "dove-pdma", pdma); 115 orion_clkdev_add(NULL, "dove-pdma", pdma);
diff --git a/arch/arm/mach-dove/include/mach/debug-macro.S b/arch/arm/mach-dove/include/mach/debug-macro.S
deleted file mode 100644
index 5929cbc59161..000000000000
--- a/arch/arm/mach-dove/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * arch/arm/mach-dove/include/mach/debug-macro.S
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7*/
8
9#include <mach/bridge-regs.h>
10
11 .macro addruart, rp, rv, tmp
12 ldr \rp, =DOVE_SB_REGS_PHYS_BASE
13 ldr \rv, =DOVE_SB_REGS_VIRT_BASE
14 orr \rp, \rp, #0x00012000
15 orr \rv, \rv, #0x00012000
16 .endm
17
18#define UART_SHIFT 2
19#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-dove/mpp.c b/arch/arm/mach-dove/mpp.c
index 60bd729a1ba5..8a433a51289c 100644
--- a/arch/arm/mach-dove/mpp.c
+++ b/arch/arm/mach-dove/mpp.c
@@ -47,7 +47,7 @@ static const struct dove_mpp_grp dove_mpp_grp[] = {
47 47
48/* Enable gpio for a range of pins. mode should be a combination of 48/* Enable gpio for a range of pins. mode should be a combination of
49 GPIO_OUTPUT_OK | GPIO_INPUT_OK */ 49 GPIO_OUTPUT_OK | GPIO_INPUT_OK */
50static void dove_mpp_gpio_mode(int start, int end, int gpio_mode) 50static void __init dove_mpp_gpio_mode(int start, int end, int gpio_mode)
51{ 51{
52 int i; 52 int i;
53 53
diff --git a/arch/arm/mach-ebsa110/include/mach/debug-macro.S b/arch/arm/mach-ebsa110/include/mach/debug-macro.S
deleted file mode 100644
index bb02c05e6812..000000000000
--- a/arch/arm/mach-ebsa110/include/mach/debug-macro.S
+++ /dev/null
@@ -1,22 +0,0 @@
1/* arch/arm/mach-ebsa110/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12**/
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0xf0000000
16 orr \rp, \rp, #0x00000be0
17 mov \rp, \rv
18 .endm
19
20#define UART_SHIFT 2
21#define FLOW_CONTROL
22#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index fe3c1fa5462b..93e54fd4e3d5 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -194,20 +194,6 @@ config MACH_VISION_EP9307
194 Say 'Y' here if you want your kernel to support the 194 Say 'Y' here if you want your kernel to support the
195 Vision Engraving Systems EP9307 SoM. 195 Vision Engraving Systems EP9307 SoM.
196 196
197choice
198 prompt "Select a UART for early kernel messages"
199
200config EP93XX_EARLY_UART1
201 bool "UART1"
202
203config EP93XX_EARLY_UART2
204 bool "UART2"
205
206config EP93XX_EARLY_UART3
207 bool "UART3"
208
209endchoice
210
211endmenu 197endmenu
212 198
213endif 199endif
diff --git a/arch/arm/mach-ep93xx/include/mach/debug-macro.S b/arch/arm/mach-ep93xx/include/mach/debug-macro.S
deleted file mode 100644
index af54e43132cf..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/include/mach/debug-macro.S
3 * Debugging macro include header
4 *
5 * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or (at
10 * your option) any later version.
11 */
12#include <mach/ep93xx-regs.h>
13
14 .macro addruart, rp, rv, tmp
15 ldr \rp, =EP93XX_APB_PHYS_BASE @ Physical base
16 ldr \rv, =EP93XX_APB_VIRT_BASE @ virtual base
17 orr \rp, \rp, #0x000c0000
18 orr \rv, \rv, #0x000c0000
19 .endm
20
21#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index b5cc77d2380b..03c42e5400d2 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -31,18 +31,8 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
31 *((volatile unsigned int *)ptr) = value; 31 *((volatile unsigned int *)ptr) = value;
32} 32}
33 33
34#if defined(CONFIG_EP93XX_EARLY_UART1) 34#define PHYS_UART_DATA (CONFIG_DEBUG_UART_PHYS + 0x00)
35#define UART_BASE EP93XX_UART1_PHYS_BASE 35#define PHYS_UART_FLAG (CONFIG_DEBUG_UART_PHYS + 0x18)
36#elif defined(CONFIG_EP93XX_EARLY_UART2)
37#define UART_BASE EP93XX_UART2_PHYS_BASE
38#elif defined(CONFIG_EP93XX_EARLY_UART3)
39#define UART_BASE EP93XX_UART3_PHYS_BASE
40#else
41#define UART_BASE EP93XX_UART1_PHYS_BASE
42#endif
43
44#define PHYS_UART_DATA (UART_BASE + 0x00)
45#define PHYS_UART_FLAG (UART_BASE + 0x18)
46#define UART_FLAG_TXFF 0x20 36#define UART_FLAG_TXFF 0x20
47 37
48static inline void putc(int c) 38static inline void putc(int c)
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index c169f0c99b2a..02247f313e94 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -13,20 +13,6 @@
13 13
14#include <asm/hardware/dec21285.h> 14#include <asm/hardware/dec21285.h>
15 15
16#ifndef CONFIG_DEBUG_DC21285_PORT
17 /* For NetWinder debugging */
18 .macro addruart, rp, rv, tmp
19 mov \rp, #0x000003f8
20 orr \rv, \rp, #0xfe000000 @ virtual
21 orr \rv, \rv, #0x00e00000 @ virtual
22 orr \rp, \rp, #0x7c000000 @ physical
23 .endm
24
25#define UART_SHIFT 0
26#define FLOW_CONTROL
27#include <asm/hardware/debug-8250.S>
28
29#else
30#include <mach/hardware.h> 16#include <mach/hardware.h>
31 /* For EBSA285 debugging */ 17 /* For EBSA285 debugging */
32 .equ dc21285_high, ARMCSR_BASE & 0xff000000 18 .equ dc21285_high, ARMCSR_BASE & 0xff000000
@@ -54,4 +40,3 @@
54 40
55 .macro waituart,rd,rx 41 .macro waituart,rd,rx
56 .endm 42 .endm
57#endif
diff --git a/arch/arm/mach-gemini/include/mach/debug-macro.S b/arch/arm/mach-gemini/include/mach/debug-macro.S
deleted file mode 100644
index 837670763b85..000000000000
--- a/arch/arm/mach-gemini/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Debugging macro include header
3 *
4 * Copyright (C) 1994-1999 Russell King
5 * Copyright (C) 2001-2006 Storlink, Corp.
6 * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <mach/hardware.h>
13
14 .macro addruart, rp, rv, tmp
15 ldr \rp, =GEMINI_UART_BASE @ physical
16 ldr \rv, =IO_ADDRESS(GEMINI_UART_BASE) @ virtual
17 .endm
18
19#define UART_SHIFT 2
20#define FLOW_CONTROL
21#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 7be13f8e69a0..a02f275a198d 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev)
254{ 254{
255 struct device_node *np; 255 struct device_node *np;
256 256
257 np = of_find_node_by_path("/cpus/cpu@0"); 257 np = of_node_get(cpu_dev->of_node);
258 if (!np) { 258 if (!np) {
259 pr_warn("failed to find cpu0 node\n"); 259 pr_warn("failed to find cpu0 node\n");
260 return; 260 return;
261 } 261 }
262 262
263 cpu_dev->of_node = np;
264 if (of_init_opp_table(cpu_dev)) { 263 if (of_init_opp_table(cpu_dev)) {
265 pr_warn("failed to init OPP table\n"); 264 pr_warn("failed to init OPP table\n");
266 goto put_node; 265 goto put_node;
diff --git a/arch/arm/mach-integrator/include/mach/debug-macro.S b/arch/arm/mach-integrator/include/mach/debug-macro.S
deleted file mode 100644
index 411b116077e4..000000000000
--- a/arch/arm/mach-integrator/include/mach/debug-macro.S
+++ /dev/null
@@ -1,20 +0,0 @@
1/* arch/arm/mach-integrator/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0x16000000 @ physical base address
16 mov \rv, #0xf0000000 @ virtual base
17 add \rv, \rv, #0x16000000 >> 4
18 .endm
19
20#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-iop13xx/include/mach/debug-macro.S b/arch/arm/mach-iop13xx/include/mach/debug-macro.S
deleted file mode 100644
index d869a6f67e5c..000000000000
--- a/arch/arm/mach-iop13xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * arch/arm/mach-iop13xx/include/mach/debug-macro.S
3 *
4 * Debugging macro include header
5 *
6 * Copyright (C) 1994-1999 Russell King
7 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0x00002300
16 orr \rp, \rp, #0x00000040
17 orr \rv, \rp, #0xfe000000 @ virtual
18 orr \rv, \rv, #0x00e80000
19 orr \rp, \rp, #0xff000000 @ physical
20 orr \rp, \rp, #0x00d80000
21 .endm
22
23#define UART_SHIFT 2
24#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop32x/include/mach/debug-macro.S b/arch/arm/mach-iop32x/include/mach/debug-macro.S
deleted file mode 100644
index 363bdf90b34d..000000000000
--- a/arch/arm/mach-iop32x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/arm/mach-iop32x/include/mach/debug-macro.S
3 *
4 * Debugging macro include header
5 *
6 * Copyright (C) 1994-1999 Russell King
7 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0xfe000000 @ physical as well as virtual
16 orr \rp, \rp, #0x00800000 @ location of the UART
17 mov \rv, \rp
18 .endm
19
20#define UART_SHIFT 0
21#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop33x/include/mach/debug-macro.S b/arch/arm/mach-iop33x/include/mach/debug-macro.S
deleted file mode 100644
index 361be1f6026e..000000000000
--- a/arch/arm/mach-iop33x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-iop33x/include/mach/debug-macro.S
3 *
4 * Debugging macro include header
5 *
6 * Copyright (C) 1994-1999 Russell King
7 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0x00ff0000
16 orr \rp, \rp, #0x0000f700
17 orr \rv, #0xfe000000 @ virtual
18 orr \rp, #0xff000000 @ physical
19 .endm
20
21#define UART_SHIFT 2
22#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S b/arch/arm/mach-ixp4xx/include/mach/debug-macro.S
deleted file mode 100644
index ff686cbc5df4..000000000000
--- a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,26 +0,0 @@
1/* arch/arm/mach-ixp4xx/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13 .macro addruart, rp, rv, tmp
14#ifdef __ARMEB__
15 mov \rp, #3 @ Uart regs are at off set of 3 if
16 @ byte writes used - Big Endian.
17#else
18 mov \rp, #0
19#endif
20 orr \rv, \rp, #0xfe000000 @ virtual
21 orr \rv, \rv, #0x00f00000
22 orr \rp, \rp, #0xc8000000 @ physical
23 .endm
24
25#define UART_SHIFT 2
26#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-keystone/Kconfig b/arch/arm/mach-keystone/Kconfig
index 51a50e996840..366d1a3b418d 100644
--- a/arch/arm/mach-keystone/Kconfig
+++ b/arch/arm/mach-keystone/Kconfig
@@ -7,7 +7,6 @@ config ARCH_KEYSTONE
7 select HAVE_SMP 7 select HAVE_SMP
8 select CLKSRC_MMIO 8 select CLKSRC_MMIO
9 select GENERIC_CLOCKEVENTS 9 select GENERIC_CLOCKEVENTS
10 select HAVE_SCHED_CLOCK
11 select ARCH_WANT_OPTIONAL_GPIOLIB 10 select ARCH_WANT_OPTIONAL_GPIOLIB
12 select ARM_ERRATA_798181 if SMP 11 select ARM_ERRATA_798181 if SMP
13 help 12 help
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index 14378e3fef16..c12296157d4a 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -38,6 +38,5 @@ static int keystone_smp_boot_secondary(unsigned int cpu,
38} 38}
39 39
40struct smp_operations keystone_smp_ops __initdata = { 40struct smp_operations keystone_smp_ops __initdata = {
41 .smp_init_cpus = arm_dt_init_cpu_maps,
42 .smp_boot_secondary = keystone_smp_boot_secondary, 41 .smp_boot_secondary = keystone_smp_boot_secondary,
43}; 42};
diff --git a/arch/arm/mach-keystone/smc.S b/arch/arm/mach-keystone/smc.S
index 9b9e4f7b241e..d15de8179fab 100644
--- a/arch/arm/mach-keystone/smc.S
+++ b/arch/arm/mach-keystone/smc.S
@@ -22,8 +22,7 @@
22 * Return: Non zero value on failure 22 * Return: Non zero value on failure
23 */ 23 */
24ENTRY(keystone_cpu_smc) 24ENTRY(keystone_cpu_smc)
25 stmfd sp!, {r4-r12, lr} 25 stmfd sp!, {r4-r11, lr}
26 smc #0 26 smc #0
27 dsb 27 ldmfd sp!, {r4-r11, pc}
28 ldmfd sp!, {r4-r12, pc}
29ENDPROC(keystone_cpu_smc) 28ENDPROC(keystone_cpu_smc)
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index e9238b5567ee..1663de090984 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -264,7 +264,7 @@ void __init kirkwood_clk_init(void)
264 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1); 264 orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
265 orion_clkdev_add("0", "pcie", pex0); 265 orion_clkdev_add("0", "pcie", pex0);
266 orion_clkdev_add("1", "pcie", pex1); 266 orion_clkdev_add("1", "pcie", pex1);
267 orion_clkdev_add(NULL, "kirkwood-i2s", audio); 267 orion_clkdev_add(NULL, "mvebu-audio", audio);
268 orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit); 268 orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit);
269 orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".1", runit); 269 orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".1", runit);
270 270
@@ -560,7 +560,7 @@ void __init kirkwood_timer_init(void)
560/***************************************************************************** 560/*****************************************************************************
561 * Audio 561 * Audio
562 ****************************************************************************/ 562 ****************************************************************************/
563static struct resource kirkwood_i2s_resources[] = { 563static struct resource kirkwood_audio_resources[] = {
564 [0] = { 564 [0] = {
565 .start = AUDIO_PHYS_BASE, 565 .start = AUDIO_PHYS_BASE,
566 .end = AUDIO_PHYS_BASE + SZ_16K - 1, 566 .end = AUDIO_PHYS_BASE + SZ_16K - 1,
@@ -573,29 +573,23 @@ static struct resource kirkwood_i2s_resources[] = {
573 }, 573 },
574}; 574};
575 575
576static struct kirkwood_asoc_platform_data kirkwood_i2s_data = { 576static struct kirkwood_asoc_platform_data kirkwood_audio_data = {
577 .burst = 128, 577 .burst = 128,
578}; 578};
579 579
580static struct platform_device kirkwood_i2s_device = { 580static struct platform_device kirkwood_audio_device = {
581 .name = "kirkwood-i2s", 581 .name = "mvebu-audio",
582 .id = -1, 582 .id = -1,
583 .num_resources = ARRAY_SIZE(kirkwood_i2s_resources), 583 .num_resources = ARRAY_SIZE(kirkwood_audio_resources),
584 .resource = kirkwood_i2s_resources, 584 .resource = kirkwood_audio_resources,
585 .dev = { 585 .dev = {
586 .platform_data = &kirkwood_i2s_data, 586 .platform_data = &kirkwood_audio_data,
587 }, 587 },
588}; 588};
589 589
590static struct platform_device kirkwood_pcm_device = {
591 .name = "kirkwood-pcm-audio",
592 .id = -1,
593};
594
595void __init kirkwood_audio_init(void) 590void __init kirkwood_audio_init(void)
596{ 591{
597 platform_device_register(&kirkwood_i2s_device); 592 platform_device_register(&kirkwood_audio_device);
598 platform_device_register(&kirkwood_pcm_device);
599} 593}
600 594
601/***************************************************************************** 595/*****************************************************************************
diff --git a/arch/arm/mach-kirkwood/include/mach/debug-macro.S b/arch/arm/mach-kirkwood/include/mach/debug-macro.S
deleted file mode 100644
index f785d401a607..000000000000
--- a/arch/arm/mach-kirkwood/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * arch/arm/mach-kirkwood/include/mach/debug-macro.S
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7*/
8
9#include <mach/bridge-regs.h>
10
11 .macro addruart, rp, rv, tmp
12 ldr \rp, =KIRKWOOD_REGS_PHYS_BASE
13 ldr \rv, =KIRKWOOD_REGS_VIRT_BASE
14 orr \rp, \rp, #0x00012000
15 orr \rv, \rv, #0x00012000
16 .endm
17
18#define UART_SHIFT 2
19#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-lpc32xx/include/mach/debug-macro.S b/arch/arm/mach-lpc32xx/include/mach/debug-macro.S
deleted file mode 100644
index 351bd6c84909..000000000000
--- a/arch/arm/mach-lpc32xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * arch/arm/mach-lpc32xx/include/mach/debug-macro.S
3 *
4 * Author: Kevin Wells <kevin.wells@nxp.com>
5 *
6 * Copyright (C) 2010 NXP Semiconductors
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19/*
20 * Debug output is hardcoded to standard UART 5
21*/
22
23 .macro addruart, rp, rv, tmp
24 ldreq \rp, =0x40090000
25 ldrne \rv, =0xF4090000
26 .endm
27
28#define UART_SHIFT 2
29#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e7881b..905efc8cac79 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@ config MSM_SMD
121 bool 121 bool
122 122
123config MSM_GPIOMUX 123config MSM_GPIOMUX
124 depends on !(ARCH_MSM8X60 || ARCH_MSM8960) 124 bool
125 bool "MSM V1 TLMM GPIOMUX architecture"
126 help 125 help
127 Support for MSM V1 TLMM GPIOMUX architecture. 126 Support for MSM V1 TLMM GPIOMUX architecture.
128 127
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c
index 6d50fb964863..d83404d4b328 100644
--- a/arch/arm/mach-msm/devices-msm7x00.c
+++ b/arch/arm/mach-msm/devices-msm7x00.c
@@ -456,9 +456,9 @@ static struct clk_pcom_desc msm_clocks_7x01a[] = {
456 CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), 456 CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
457 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), 457 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
458 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), 458 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
459 CLK_PCOM("uart_clk", UART1_CLK, "msm_serial.0", OFF), 459 CLK_PCOM("core", UART1_CLK, "msm_serial.0", OFF),
460 CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0), 460 CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
461 CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF), 461 CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
462 CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF), 462 CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF),
463 CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0), 463 CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0),
464 CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF), 464 CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF),
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index 6b0e9845e753..c15ea8ab20a7 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -211,7 +211,7 @@ static struct clk_pcom_desc msm_clocks_7x30[] = {
211 CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0), 211 CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0),
212 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), 212 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
213 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), 213 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
214 CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0), 214 CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
215 CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), 215 CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
216 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), 216 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
217 CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), 217 CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index c1c45ad2bacb..9e1e9ce07b1a 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -358,9 +358,9 @@ static struct clk_pcom_desc msm_clocks_8x50[] = {
358 CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), 358 CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
359 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), 359 CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
360 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), 360 CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
361 CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF), 361 CLK_PCOM("core", UART1_CLK, NULL, OFF),
362 CLK_PCOM("uart_clk", UART2_CLK, NULL, 0), 362 CLK_PCOM("core", UART2_CLK, NULL, 0),
363 CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF), 363 CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
364 CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF), 364 CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF),
365 CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0), 365 CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0),
366 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), 366 CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2abd7144..000000000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
1/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
15 * 02110-1301, USA.
16 */
17#include <linux/kernel.h>
18#include "gpiomux.h"
19#include "proc_comm.h"
20
21void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
22{
23 unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
24 ((gpio & 0x3ff) << 4);
25 unsigned tlmm_disable = 0;
26 int rc;
27
28 rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
29 &tlmm_config, &tlmm_disable);
30 if (rc)
31 pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
32 __func__, rc, tlmm_config, tlmm_disable);
33}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41a8923..4410d7766f93 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
73int msm_gpiomux_write(unsigned gpio, 73int msm_gpiomux_write(unsigned gpio,
74 gpiomux_config_t active, 74 gpiomux_config_t active,
75 gpiomux_config_t suspended); 75 gpiomux_config_t suspended);
76
77/* Architecture-internal function for use by the framework only.
78 * This function can assume the following:
79 * - the gpio value has passed a bounds-check
80 * - the gpiomux spinlock has been obtained
81 *
82 * This function is not for public consumption. External users
83 * should use msm_gpiomux_write.
84 */
85void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
86#else 76#else
87static inline int msm_gpiomux_write(unsigned gpio, 77static inline int msm_gpiomux_write(unsigned gpio,
88 gpiomux_config_t active, 78 gpiomux_config_t active,
diff --git a/arch/arm/mach-mv78xx0/include/mach/debug-macro.S b/arch/arm/mach-mv78xx0/include/mach/debug-macro.S
deleted file mode 100644
index a7df02b049b7..000000000000
--- a/arch/arm/mach-mv78xx0/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * arch/arm/mach-mv78xx0/include/mach/debug-macro.S
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7*/
8
9#include <mach/mv78xx0.h>
10
11 .macro addruart, rp, rv, tmp
12 ldr \rp, =MV78XX0_REGS_PHYS_BASE
13 ldr \rv, =MV78XX0_REGS_VIRT_BASE
14 orr \rp, \rp, #0x00012000
15 orr \rv, \rv, #0x00012000
16 .endm
17
18#define UART_SHIFT 2
19#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 2e4508a9baf6..f9c09b75d4d7 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -29,45 +29,40 @@
29#include "pmsu.h" 29#include "pmsu.h"
30#include "coherency.h" 30#include "coherency.h"
31 31
32static struct clk *__init get_cpu_clk(int cpu)
33{
34 struct clk *cpu_clk;
35 struct device_node *np = of_get_cpu_node(cpu, NULL);
36
37 if (WARN(!np, "missing cpu node\n"))
38 return NULL;
39 cpu_clk = of_clk_get(np, 0);
40 if (WARN_ON(IS_ERR(cpu_clk)))
41 return NULL;
42 return cpu_clk;
43}
44
32void __init set_secondary_cpus_clock(void) 45void __init set_secondary_cpus_clock(void)
33{ 46{
34 int thiscpu; 47 int thiscpu, cpu;
35 unsigned long rate; 48 unsigned long rate;
36 struct clk *cpu_clk = NULL; 49 struct clk *cpu_clk;
37 struct device_node *np = NULL;
38 50
39 thiscpu = smp_processor_id(); 51 thiscpu = smp_processor_id();
40 for_each_node_by_type(np, "cpu") { 52 cpu_clk = get_cpu_clk(thiscpu);
41 int err; 53 if (!cpu_clk)
42 int cpu;
43
44 err = of_property_read_u32(np, "reg", &cpu);
45 if (WARN_ON(err))
46 return;
47
48 if (cpu == thiscpu) {
49 cpu_clk = of_clk_get(np, 0);
50 break;
51 }
52 }
53 if (WARN_ON(IS_ERR(cpu_clk)))
54 return; 54 return;
55 clk_prepare_enable(cpu_clk); 55 clk_prepare_enable(cpu_clk);
56 rate = clk_get_rate(cpu_clk); 56 rate = clk_get_rate(cpu_clk);
57 57
58 /* set all the other CPU clk to the same rate than the boot CPU */ 58 /* set all the other CPU clk to the same rate than the boot CPU */
59 for_each_node_by_type(np, "cpu") { 59 for_each_possible_cpu(cpu) {
60 int err; 60 if (cpu == thiscpu)
61 int cpu; 61 continue;
62 62 cpu_clk = get_cpu_clk(cpu);
63 err = of_property_read_u32(np, "reg", &cpu); 63 if (!cpu_clk)
64 if (WARN_ON(err))
65 return; 64 return;
66 65 clk_set_rate(cpu_clk, rate);
67 if (cpu != thiscpu) {
68 cpu_clk = of_clk_get(np, 0);
69 clk_set_rate(cpu_clk, rate);
70 }
71 } 66 }
72} 67}
73 68
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 76170dd4d88f..56021c67c89c 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -64,7 +64,7 @@ config SOC_OMAP5
64 select ARM_ERRATA_798181 if SMP 64 select ARM_ERRATA_798181 if SMP
65 65
66config SOC_AM33XX 66config SOC_AM33XX
67 bool "AM33XX support" 67 bool "TI AM33XX"
68 depends on ARCH_MULTI_V7 68 depends on ARCH_MULTI_V7
69 select ARCH_OMAP2PLUS 69 select ARCH_OMAP2PLUS
70 select ARM_CPU_SUSPEND if PM 70 select ARM_CPU_SUSPEND if PM
diff --git a/arch/arm/mach-omap2/am33xx-restart.c b/arch/arm/mach-omap2/am33xx-restart.c
index 1eae96212315..c88d8df753c2 100644
--- a/arch/arm/mach-omap2/am33xx-restart.c
+++ b/arch/arm/mach-omap2/am33xx-restart.c
@@ -24,8 +24,8 @@ void am33xx_restart(enum reboot_mode mode, const char *cmd)
24{ 24{
25 /* TODO: Handle mode and cmd if necessary */ 25 /* TODO: Handle mode and cmd if necessary */
26 26
27 am33xx_prm_rmw_reg_bits(AM33XX_GLOBAL_WARM_SW_RST_MASK, 27 am33xx_prm_rmw_reg_bits(AM33XX_RST_GLOBAL_WARM_SW_MASK,
28 AM33XX_GLOBAL_WARM_SW_RST_MASK, 28 AM33XX_RST_GLOBAL_WARM_SW_MASK,
29 AM33XX_PRM_DEVICE_MOD, 29 AM33XX_PRM_DEVICE_MOD,
30 AM33XX_PRM_RSTCTRL_OFFSET); 30 AM33XX_PRM_RSTCTRL_OFFSET);
31 31
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 244d8a5aa54b..c711ad6ac067 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -100,39 +100,52 @@ static struct platform_device sdp2430_flash_device = {
100 .resource = &sdp2430_flash_resource, 100 .resource = &sdp2430_flash_resource,
101}; 101};
102 102
103static struct platform_device *sdp2430_devices[] __initdata = {
104 &sdp2430_flash_device,
105};
106
107/* LCD */ 103/* LCD */
108#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 104#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
109#define SDP2430_LCD_PANEL_ENABLE_GPIO 154 105#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
110 106
111static struct panel_generic_dpi_data sdp2430_panel_data = { 107static const struct display_timing sdp2430_lcd_videomode = {
112 .name = "nec_nl2432dr22-11b", 108 .pixelclock = { 0, 5400000, 0 },
113 .num_gpios = 2, 109
114 .gpios = { 110 .hactive = { 0, 240, 0 },
115 SDP2430_LCD_PANEL_ENABLE_GPIO, 111 .hfront_porch = { 0, 3, 0 },
116 SDP2430_LCD_PANEL_BACKLIGHT_GPIO, 112 .hback_porch = { 0, 39, 0 },
117 }, 113 .hsync_len = { 0, 3, 0 },
114
115 .vactive = { 0, 320, 0 },
116 .vfront_porch = { 0, 2, 0 },
117 .vback_porch = { 0, 7, 0 },
118 .vsync_len = { 0, 1, 0 },
119
120 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
121 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
118}; 122};
119 123
120static struct omap_dss_device sdp2430_lcd_device = { 124static struct panel_dpi_platform_data sdp2430_lcd_pdata = {
121 .name = "lcd", 125 .name = "lcd",
122 .driver_name = "generic_dpi_panel", 126 .source = "dpi.0",
123 .type = OMAP_DISPLAY_TYPE_DPI, 127
124 .phy.dpi.data_lines = 16, 128 .data_lines = 16,
125 .data = &sdp2430_panel_data, 129
130 .display_timing = &sdp2430_lcd_videomode,
131
132 .enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO,
133 .backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO,
126}; 134};
127 135
128static struct omap_dss_device *sdp2430_dss_devices[] = { 136static struct platform_device sdp2430_lcd_device = {
129 &sdp2430_lcd_device, 137 .name = "panel-dpi",
138 .id = 0,
139 .dev.platform_data = &sdp2430_lcd_pdata,
130}; 140};
131 141
132static struct omap_dss_board_info sdp2430_dss_data = { 142static struct omap_dss_board_info sdp2430_dss_data = {
133 .num_devices = ARRAY_SIZE(sdp2430_dss_devices), 143 .default_display_name = "lcd",
134 .devices = sdp2430_dss_devices, 144};
135 .default_device = &sdp2430_lcd_device, 145
146static struct platform_device *sdp2430_devices[] __initdata = {
147 &sdp2430_flash_device,
148 &sdp2430_lcd_device,
136}; 149};
137 150
138#if IS_ENABLED(CONFIG_SMC91X) 151#if IS_ENABLED(CONFIG_SMC91X)
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 23b004afa3f8..d95d0ef1354a 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -126,53 +126,65 @@ static void __init sdp3430_display_init(void)
126 126
127} 127}
128 128
129static struct panel_sharp_ls037v7dw01_data sdp3430_lcd_data = { 129static struct panel_sharp_ls037v7dw01_platform_data sdp3430_lcd_pdata = {
130 .resb_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO, 130 .name = "lcd",
131 .ini_gpio = -1, 131 .source = "dpi.0",
132 .mo_gpio = -1, 132
133 .lr_gpio = -1, 133 .data_lines = 16,
134 .ud_gpio = -1, 134
135 .resb_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO,
136 .ini_gpio = -1,
137 .mo_gpio = -1,
138 .lr_gpio = -1,
139 .ud_gpio = -1,
140};
141
142static struct platform_device sdp3430_lcd_device = {
143 .name = "panel-sharp-ls037v7dw01",
144 .id = 0,
145 .dev.platform_data = &sdp3430_lcd_pdata,
135}; 146};
136 147
137static struct omap_dss_device sdp3430_lcd_device = { 148static struct connector_dvi_platform_data sdp3430_dvi_connector_pdata = {
138 .name = "lcd", 149 .name = "dvi",
139 .driver_name = "sharp_ls_panel", 150 .source = "tfp410.0",
140 .type = OMAP_DISPLAY_TYPE_DPI, 151 .i2c_bus_num = -1,
141 .phy.dpi.data_lines = 16,
142 .data = &sdp3430_lcd_data,
143}; 152};
144 153
145static struct tfp410_platform_data dvi_panel = { 154static struct platform_device sdp3430_dvi_connector_device = {
146 .power_down_gpio = -1, 155 .name = "connector-dvi",
147 .i2c_bus_num = -1, 156 .id = 0,
157 .dev.platform_data = &sdp3430_dvi_connector_pdata,
148}; 158};
149 159
150static struct omap_dss_device sdp3430_dvi_device = { 160static struct encoder_tfp410_platform_data sdp3430_tfp410_pdata = {
151 .name = "dvi", 161 .name = "tfp410.0",
152 .type = OMAP_DISPLAY_TYPE_DPI, 162 .source = "dpi.0",
153 .driver_name = "tfp410", 163 .data_lines = 24,
154 .data = &dvi_panel, 164 .power_down_gpio = -1,
155 .phy.dpi.data_lines = 24,
156}; 165};
157 166
158static struct omap_dss_device sdp3430_tv_device = { 167static struct platform_device sdp3430_tfp410_device = {
159 .name = "tv", 168 .name = "tfp410",
160 .driver_name = "venc", 169 .id = 0,
161 .type = OMAP_DISPLAY_TYPE_VENC, 170 .dev.platform_data = &sdp3430_tfp410_pdata,
162 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
163}; 171};
164 172
173static struct connector_atv_platform_data sdp3430_tv_pdata = {
174 .name = "tv",
175 .source = "venc.0",
176 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
177 .invert_polarity = false,
178};
165 179
166static struct omap_dss_device *sdp3430_dss_devices[] = { 180static struct platform_device sdp3430_tv_connector_device = {
167 &sdp3430_lcd_device, 181 .name = "connector-analog-tv",
168 &sdp3430_dvi_device, 182 .id = 0,
169 &sdp3430_tv_device, 183 .dev.platform_data = &sdp3430_tv_pdata,
170}; 184};
171 185
172static struct omap_dss_board_info sdp3430_dss_data = { 186static struct omap_dss_board_info sdp3430_dss_data = {
173 .num_devices = ARRAY_SIZE(sdp3430_dss_devices), 187 .default_display_name = "lcd",
174 .devices = sdp3430_dss_devices,
175 .default_device = &sdp3430_lcd_device,
176}; 188};
177 189
178static struct omap2_hsmmc_info mmc[] = { 190static struct omap2_hsmmc_info mmc[] = {
@@ -583,6 +595,11 @@ static void __init omap_3430sdp_init(void)
583 omap_hsmmc_init(mmc); 595 omap_hsmmc_init(mmc);
584 omap3430_i2c_init(); 596 omap3430_i2c_init();
585 omap_display_init(&sdp3430_dss_data); 597 omap_display_init(&sdp3430_dss_data);
598 platform_device_register(&sdp3430_lcd_device);
599 platform_device_register(&sdp3430_tfp410_device);
600 platform_device_register(&sdp3430_dvi_connector_device);
601 platform_device_register(&sdp3430_tv_connector_device);
602
586 if (omap_rev() > OMAP3430_REV_ES1_0) 603 if (omap_rev() > OMAP3430_REV_ES1_0)
587 gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2; 604 gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2;
588 else 605 else
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index fc53911d0d13..0d499a1878f6 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -110,8 +110,6 @@ static void __init am3517_crane_i2c_init(void)
110 110
111static void __init am3517_crane_init(void) 111static void __init am3517_crane_init(void)
112{ 112{
113 int ret;
114
115 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 113 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
116 omap_serial_init(); 114 omap_serial_init();
117 omap_sdrc_init(NULL, NULL); 115 omap_sdrc_init(NULL, NULL);
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index c7196a69ff37..543d9a882de3 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -120,56 +120,95 @@ static int __init am3517_evm_i2c_init(void)
120 return 0; 120 return 0;
121} 121}
122 122
123static struct panel_generic_dpi_data lcd_panel = { 123static const struct display_timing am3517_evm_lcd_videomode = {
124 .name = "sharp_lq", 124 .pixelclock = { 0, 9000000, 0 },
125 .num_gpios = 3, 125
126 .gpios = { 126 .hactive = { 0, 480, 0 },
127 LCD_PANEL_PWR, 127 .hfront_porch = { 0, 3, 0 },
128 LCD_PANEL_BKLIGHT_PWR, 128 .hback_porch = { 0, 2, 0 },
129 LCD_PANEL_PWM, 129 .hsync_len = { 0, 42, 0 },
130 }, 130
131 .vactive = { 0, 272, 0 },
132 .vfront_porch = { 0, 3, 0 },
133 .vback_porch = { 0, 2, 0 },
134 .vsync_len = { 0, 11, 0 },
135
136 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
137 DISPLAY_FLAGS_DE_LOW | DISPLAY_FLAGS_PIXDATA_POSEDGE,
138};
139
140static struct panel_dpi_platform_data am3517_evm_lcd_pdata = {
141 .name = "lcd",
142 .source = "dpi.0",
143
144 .data_lines = 16,
145
146 .display_timing = &am3517_evm_lcd_videomode,
147
148 .enable_gpio = LCD_PANEL_PWR,
149 .backlight_gpio = LCD_PANEL_BKLIGHT_PWR,
150};
151
152static struct platform_device am3517_evm_lcd_device = {
153 .name = "panel-dpi",
154 .id = 0,
155 .dev.platform_data = &am3517_evm_lcd_pdata,
131}; 156};
132 157
133static struct omap_dss_device am3517_evm_lcd_device = { 158static struct connector_dvi_platform_data am3517_evm_dvi_connector_pdata = {
134 .type = OMAP_DISPLAY_TYPE_DPI, 159 .name = "dvi",
135 .name = "lcd", 160 .source = "tfp410.0",
136 .driver_name = "generic_dpi_panel", 161 .i2c_bus_num = -1,
137 .data = &lcd_panel,
138 .phy.dpi.data_lines = 16,
139}; 162};
140 163
141static struct omap_dss_device am3517_evm_tv_device = { 164static struct platform_device am3517_evm_dvi_connector_device = {
142 .type = OMAP_DISPLAY_TYPE_VENC, 165 .name = "connector-dvi",
143 .name = "tv", 166 .id = 0,
144 .driver_name = "venc", 167 .dev.platform_data = &am3517_evm_dvi_connector_pdata,
145 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
146}; 168};
147 169
148static struct tfp410_platform_data dvi_panel = { 170static struct encoder_tfp410_platform_data am3517_evm_tfp410_pdata = {
149 .power_down_gpio = -1, 171 .name = "tfp410.0",
150 .i2c_bus_num = -1, 172 .source = "dpi.0",
173 .data_lines = 24,
174 .power_down_gpio = -1,
151}; 175};
152 176
153static struct omap_dss_device am3517_evm_dvi_device = { 177static struct platform_device am3517_evm_tfp410_device = {
154 .type = OMAP_DISPLAY_TYPE_DPI, 178 .name = "tfp410",
155 .name = "dvi", 179 .id = 0,
156 .driver_name = "tfp410", 180 .dev.platform_data = &am3517_evm_tfp410_pdata,
157 .data = &dvi_panel,
158 .phy.dpi.data_lines = 24,
159}; 181};
160 182
161static struct omap_dss_device *am3517_evm_dss_devices[] = { 183static struct connector_atv_platform_data am3517_evm_tv_pdata = {
162 &am3517_evm_lcd_device, 184 .name = "tv",
163 &am3517_evm_tv_device, 185 .source = "venc.0",
164 &am3517_evm_dvi_device, 186 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
187 .invert_polarity = false,
188};
189
190static struct platform_device am3517_evm_tv_connector_device = {
191 .name = "connector-analog-tv",
192 .id = 0,
193 .dev.platform_data = &am3517_evm_tv_pdata,
165}; 194};
166 195
167static struct omap_dss_board_info am3517_evm_dss_data = { 196static struct omap_dss_board_info am3517_evm_dss_data = {
168 .num_devices = ARRAY_SIZE(am3517_evm_dss_devices), 197 .default_display_name = "lcd",
169 .devices = am3517_evm_dss_devices,
170 .default_device = &am3517_evm_lcd_device,
171}; 198};
172 199
200static void __init am3517_evm_display_init(void)
201{
202 gpio_request_one(LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd panel pwm");
203
204 omap_display_init(&am3517_evm_dss_data);
205
206 platform_device_register(&am3517_evm_tfp410_device);
207 platform_device_register(&am3517_evm_dvi_connector_device);
208 platform_device_register(&am3517_evm_lcd_device);
209 platform_device_register(&am3517_evm_tv_connector_device);
210}
211
173/* 212/*
174 * Board initialization 213 * Board initialization
175 */ 214 */
@@ -295,7 +334,9 @@ static void __init am3517_evm_init(void)
295 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 334 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
296 335
297 am3517_evm_i2c_init(); 336 am3517_evm_i2c_init();
298 omap_display_init(&am3517_evm_dss_data); 337
338 am3517_evm_display_init();
339
299 omap_serial_init(); 340 omap_serial_init();
300 omap_sdrc_init(NULL, NULL); 341 omap_sdrc_init(NULL, NULL);
301 342
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index d4622ed26252..33d159e2386e 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -190,52 +190,81 @@ static inline void cm_t35_init_nand(void) {}
190#define CM_T35_LCD_BL_GPIO 58 190#define CM_T35_LCD_BL_GPIO 58
191#define CM_T35_DVI_EN_GPIO 54 191#define CM_T35_DVI_EN_GPIO 54
192 192
193static struct panel_generic_dpi_data lcd_panel = { 193static const struct display_timing cm_t35_lcd_videomode = {
194 .name = "toppoly_tdo35s", 194 .pixelclock = { 0, 26000000, 0 },
195 .num_gpios = 1, 195
196 .gpios = { 196 .hactive = { 0, 480, 0 },
197 CM_T35_LCD_BL_GPIO, 197 .hfront_porch = { 0, 104, 0 },
198 }, 198 .hback_porch = { 0, 8, 0 },
199 .hsync_len = { 0, 8, 0 },
200
201 .vactive = { 0, 640, 0 },
202 .vfront_porch = { 0, 4, 0 },
203 .vback_porch = { 0, 2, 0 },
204 .vsync_len = { 0, 2, 0 },
205
206 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
207 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE,
208};
209
210static struct panel_dpi_platform_data cm_t35_lcd_pdata = {
211 .name = "lcd",
212 .source = "dpi.0",
213
214 .data_lines = 18,
215
216 .display_timing = &cm_t35_lcd_videomode,
217
218 .enable_gpio = -1,
219 .backlight_gpio = CM_T35_LCD_BL_GPIO,
220};
221
222static struct platform_device cm_t35_lcd_device = {
223 .name = "panel-dpi",
224 .id = 0,
225 .dev.platform_data = &cm_t35_lcd_pdata,
199}; 226};
200 227
201static struct omap_dss_device cm_t35_lcd_device = { 228static struct connector_dvi_platform_data cm_t35_dvi_connector_pdata = {
202 .name = "lcd", 229 .name = "dvi",
203 .type = OMAP_DISPLAY_TYPE_DPI, 230 .source = "tfp410.0",
204 .driver_name = "generic_dpi_panel", 231 .i2c_bus_num = -1,
205 .data = &lcd_panel,
206 .phy.dpi.data_lines = 18,
207}; 232};
208 233
209static struct tfp410_platform_data dvi_panel = { 234static struct platform_device cm_t35_dvi_connector_device = {
210 .power_down_gpio = CM_T35_DVI_EN_GPIO, 235 .name = "connector-dvi",
211 .i2c_bus_num = -1, 236 .id = 0,
237 .dev.platform_data = &cm_t35_dvi_connector_pdata,
212}; 238};
213 239
214static struct omap_dss_device cm_t35_dvi_device = { 240static struct encoder_tfp410_platform_data cm_t35_tfp410_pdata = {
215 .name = "dvi", 241 .name = "tfp410.0",
216 .type = OMAP_DISPLAY_TYPE_DPI, 242 .source = "dpi.0",
217 .driver_name = "tfp410", 243 .data_lines = 24,
218 .data = &dvi_panel, 244 .power_down_gpio = CM_T35_DVI_EN_GPIO,
219 .phy.dpi.data_lines = 24,
220}; 245};
221 246
222static struct omap_dss_device cm_t35_tv_device = { 247static struct platform_device cm_t35_tfp410_device = {
223 .name = "tv", 248 .name = "tfp410",
224 .driver_name = "venc", 249 .id = 0,
225 .type = OMAP_DISPLAY_TYPE_VENC, 250 .dev.platform_data = &cm_t35_tfp410_pdata,
226 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
227}; 251};
228 252
229static struct omap_dss_device *cm_t35_dss_devices[] = { 253static struct connector_atv_platform_data cm_t35_tv_pdata = {
230 &cm_t35_lcd_device, 254 .name = "tv",
231 &cm_t35_dvi_device, 255 .source = "venc.0",
232 &cm_t35_tv_device, 256 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
257 .invert_polarity = false,
258};
259
260static struct platform_device cm_t35_tv_connector_device = {
261 .name = "connector-analog-tv",
262 .id = 0,
263 .dev.platform_data = &cm_t35_tv_pdata,
233}; 264};
234 265
235static struct omap_dss_board_info cm_t35_dss_data = { 266static struct omap_dss_board_info cm_t35_dss_data = {
236 .num_devices = ARRAY_SIZE(cm_t35_dss_devices), 267 .default_display_name = "dvi",
237 .devices = cm_t35_dss_devices,
238 .default_device = &cm_t35_dvi_device,
239}; 268};
240 269
241static struct omap2_mcspi_device_config tdo24m_mcspi_config = { 270static struct omap2_mcspi_device_config tdo24m_mcspi_config = {
@@ -280,6 +309,11 @@ static void __init cm_t35_init_display(void)
280 pr_err("CM-T35: failed to register DSS device\n"); 309 pr_err("CM-T35: failed to register DSS device\n");
281 gpio_free(CM_T35_LCD_EN_GPIO); 310 gpio_free(CM_T35_LCD_EN_GPIO);
282 } 311 }
312
313 platform_device_register(&cm_t35_tfp410_device);
314 platform_device_register(&cm_t35_dvi_connector_device);
315 platform_device_register(&cm_t35_lcd_device);
316 platform_device_register(&cm_t35_tv_connector_device);
283} 317}
284 318
285static struct regulator_consumer_supply cm_t35_vmmc1_supply[] = { 319static struct regulator_consumer_supply cm_t35_vmmc1_supply[] = {
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index f1d91ba5d1ac..cdc4fb9960a9 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -112,50 +112,81 @@ static struct regulator_consumer_supply devkit8000_vio_supply[] = {
112 REGULATOR_SUPPLY("vcc", "spi2.0"), 112 REGULATOR_SUPPLY("vcc", "spi2.0"),
113}; 113};
114 114
115static struct panel_generic_dpi_data lcd_panel = { 115static const struct display_timing devkit8000_lcd_videomode = {
116 .name = "innolux_at070tn83", 116 .pixelclock = { 0, 40000000, 0 },
117 /* gpios filled in code */ 117
118 .hactive = { 0, 800, 0 },
119 .hfront_porch = { 0, 1, 0 },
120 .hback_porch = { 0, 1, 0 },
121 .hsync_len = { 0, 48, 0 },
122
123 .vactive = { 0, 480, 0 },
124 .vfront_porch = { 0, 12, 0 },
125 .vback_porch = { 0, 25, 0 },
126 .vsync_len = { 0, 3, 0 },
127
128 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
129 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
118}; 130};
119 131
120static struct omap_dss_device devkit8000_lcd_device = { 132static struct panel_dpi_platform_data devkit8000_lcd_pdata = {
121 .name = "lcd", 133 .name = "lcd",
122 .type = OMAP_DISPLAY_TYPE_DPI, 134 .source = "dpi.0",
123 .driver_name = "generic_dpi_panel", 135
124 .data = &lcd_panel, 136 .data_lines = 24,
125 .phy.dpi.data_lines = 24, 137
138 .display_timing = &devkit8000_lcd_videomode,
139
140 .enable_gpio = -1, /* filled in code */
141 .backlight_gpio = -1,
126}; 142};
127 143
128static struct tfp410_platform_data dvi_panel = { 144static struct platform_device devkit8000_lcd_device = {
129 .power_down_gpio = -1, 145 .name = "panel-dpi",
130 .i2c_bus_num = 1, 146 .id = 0,
147 .dev.platform_data = &devkit8000_lcd_pdata,
131}; 148};
132 149
133static struct omap_dss_device devkit8000_dvi_device = { 150static struct connector_dvi_platform_data devkit8000_dvi_connector_pdata = {
134 .name = "dvi", 151 .name = "dvi",
135 .type = OMAP_DISPLAY_TYPE_DPI, 152 .source = "tfp410.0",
136 .driver_name = "tfp410", 153 .i2c_bus_num = 1,
137 .data = &dvi_panel,
138 .phy.dpi.data_lines = 24,
139}; 154};
140 155
141static struct omap_dss_device devkit8000_tv_device = { 156static struct platform_device devkit8000_dvi_connector_device = {
142 .name = "tv", 157 .name = "connector-dvi",
143 .driver_name = "venc", 158 .id = 0,
144 .type = OMAP_DISPLAY_TYPE_VENC, 159 .dev.platform_data = &devkit8000_dvi_connector_pdata,
145 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
146}; 160};
147 161
162static struct encoder_tfp410_platform_data devkit8000_tfp410_pdata = {
163 .name = "tfp410.0",
164 .source = "dpi.0",
165 .data_lines = 24,
166 .power_down_gpio = -1, /* filled in code */
167};
148 168
149static struct omap_dss_device *devkit8000_dss_devices[] = { 169static struct platform_device devkit8000_tfp410_device = {
150 &devkit8000_lcd_device, 170 .name = "tfp410",
151 &devkit8000_dvi_device, 171 .id = 0,
152 &devkit8000_tv_device, 172 .dev.platform_data = &devkit8000_tfp410_pdata,
173};
174
175static struct connector_atv_platform_data devkit8000_tv_pdata = {
176 .name = "tv",
177 .source = "venc.0",
178 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
179 .invert_polarity = false,
180};
181
182static struct platform_device devkit8000_tv_connector_device = {
183 .name = "connector-analog-tv",
184 .id = 0,
185 .dev.platform_data = &devkit8000_tv_pdata,
153}; 186};
154 187
155static struct omap_dss_board_info devkit8000_dss_data = { 188static struct omap_dss_board_info devkit8000_dss_data = {
156 .num_devices = ARRAY_SIZE(devkit8000_dss_devices), 189 .default_display_name = "lcd",
157 .devices = devkit8000_dss_devices,
158 .default_device = &devkit8000_lcd_device,
159}; 190};
160 191
161static uint32_t board_keymap[] = { 192static uint32_t board_keymap[] = {
@@ -204,11 +235,10 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
204 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; 235 gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
205 236
206 /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ 237 /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
207 lcd_panel.num_gpios = 1; 238 devkit8000_lcd_pdata.enable_gpio = gpio + TWL4030_GPIO_MAX + 0;
208 lcd_panel.gpios[0] = gpio + TWL4030_GPIO_MAX + 0;
209 239
210 /* gpio + 7 is "DVI_PD" (out, active low) */ 240 /* gpio + 7 is "DVI_PD" (out, active low) */
211 dvi_panel.power_down_gpio = gpio + 7; 241 devkit8000_tfp410_pdata.power_down_gpio = gpio + 7;
212 242
213 return 0; 243 return 0;
214} 244}
@@ -413,6 +443,10 @@ static struct platform_device *devkit8000_devices[] __initdata = {
413 &leds_gpio, 443 &leds_gpio,
414 &keys_gpio, 444 &keys_gpio,
415 &omap_dm9000_dev, 445 &omap_dm9000_dev,
446 &devkit8000_lcd_device,
447 &devkit8000_tfp410_device,
448 &devkit8000_dvi_connector_device,
449 &devkit8000_tv_connector_device,
416}; 450};
417 451
418static struct usbhs_omap_platform_data usbhs_bdata __initdata = { 452static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 69c0acf5aa63..87e41a8b8d46 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -194,30 +194,48 @@ static struct platform_device h4_flash_device = {
194 .resource = &h4_flash_resource, 194 .resource = &h4_flash_resource,
195}; 195};
196 196
197static struct platform_device *h4_devices[] __initdata = { 197static const struct display_timing cm_t35_lcd_videomode = {
198 &h4_flash_device, 198 .pixelclock = { 0, 6250000, 0 },
199
200 .hactive = { 0, 240, 0 },
201 .hfront_porch = { 0, 15, 0 },
202 .hback_porch = { 0, 60, 0 },
203 .hsync_len = { 0, 15, 0 },
204
205 .vactive = { 0, 320, 0 },
206 .vfront_porch = { 0, 1, 0 },
207 .vback_porch = { 0, 1, 0 },
208 .vsync_len = { 0, 1, 0 },
209
210 .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
211 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
199}; 212};
200 213
201static struct panel_generic_dpi_data h4_panel_data = { 214static struct panel_dpi_platform_data cm_t35_lcd_pdata = {
202 .name = "h4", 215 .name = "lcd",
216 .source = "dpi.0",
217
218 .data_lines = 16,
219
220 .display_timing = &cm_t35_lcd_videomode,
221
222 .enable_gpio = -1,
223 .backlight_gpio = -1,
203}; 224};
204 225
205static struct omap_dss_device h4_lcd_device = { 226static struct platform_device cm_t35_lcd_device = {
206 .name = "lcd", 227 .name = "panel-dpi",
207 .driver_name = "generic_dpi_panel", 228 .id = 0,
208 .type = OMAP_DISPLAY_TYPE_DPI, 229 .dev.platform_data = &cm_t35_lcd_pdata,
209 .phy.dpi.data_lines = 16,
210 .data = &h4_panel_data,
211}; 230};
212 231
213static struct omap_dss_device *h4_dss_devices[] = { 232static struct platform_device *h4_devices[] __initdata = {
214 &h4_lcd_device, 233 &h4_flash_device,
234 &cm_t35_lcd_device,
215}; 235};
216 236
217static struct omap_dss_board_info h4_dss_data = { 237static struct omap_dss_board_info h4_dss_data = {
218 .num_devices = ARRAY_SIZE(h4_dss_devices), 238 .default_display_name = "lcd",
219 .devices = h4_dss_devices,
220 .default_device = &h4_lcd_device,
221}; 239};
222 240
223/* 2420 Sysboot setup (2430 is different) */ 241/* 2420 Sysboot setup (2430 is different) */
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 87e65dde8e13..06dbb2d3d38b 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -429,31 +429,39 @@ static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
429 .setup = igep_twl_gpio_setup, 429 .setup = igep_twl_gpio_setup,
430}; 430};
431 431
432static struct tfp410_platform_data dvi_panel = { 432static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
433 .i2c_bus_num = 3, 433 .name = "dvi",
434 .power_down_gpio = IGEP2_GPIO_DVI_PUP, 434 .source = "tfp410.0",
435 .i2c_bus_num = 3,
435}; 436};
436 437
437static struct omap_dss_device igep2_dvi_device = { 438static struct platform_device omap3stalker_dvi_connector_device = {
438 .type = OMAP_DISPLAY_TYPE_DPI, 439 .name = "connector-dvi",
439 .name = "dvi", 440 .id = 0,
440 .driver_name = "tfp410", 441 .dev.platform_data = &omap3stalker_dvi_connector_pdata,
441 .data = &dvi_panel,
442 .phy.dpi.data_lines = 24,
443}; 442};
444 443
445static struct omap_dss_device *igep2_dss_devices[] = { 444static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
446 &igep2_dvi_device 445 .name = "tfp410.0",
446 .source = "dpi.0",
447 .data_lines = 24,
448 .power_down_gpio = IGEP2_GPIO_DVI_PUP,
449};
450
451static struct platform_device omap3stalker_tfp410_device = {
452 .name = "tfp410",
453 .id = 0,
454 .dev.platform_data = &omap3stalker_tfp410_pdata,
447}; 455};
448 456
449static struct omap_dss_board_info igep2_dss_data = { 457static struct omap_dss_board_info igep2_dss_data = {
450 .num_devices = ARRAY_SIZE(igep2_dss_devices), 458 .default_display_name = "dvi",
451 .devices = igep2_dss_devices,
452 .default_device = &igep2_dvi_device,
453}; 459};
454 460
455static struct platform_device *igep_devices[] __initdata = { 461static struct platform_device *igep_devices[] __initdata = {
456 &igep_vwlan_device, 462 &igep_vwlan_device,
463 &omap3stalker_tfp410_device,
464 &omap3stalker_dvi_connector_device,
457}; 465};
458 466
459static int igep2_keymap[] = { 467static int igep2_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 62e4f701b63b..dd8da2c5399f 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -184,45 +184,70 @@ static inline void __init ldp_init_smsc911x(void)
184#define LCD_PANEL_RESET_GPIO 55 184#define LCD_PANEL_RESET_GPIO 55
185#define LCD_PANEL_QVGA_GPIO 56 185#define LCD_PANEL_QVGA_GPIO 56
186 186
187static struct panel_generic_dpi_data ldp_panel_data = { 187static const struct display_timing ldp_lcd_videomode = {
188 .name = "nec_nl2432dr22-11b", 188 .pixelclock = { 0, 5400000, 0 },
189 .num_gpios = 4, 189
190 /* gpios filled in code */ 190 .hactive = { 0, 240, 0 },
191 .hfront_porch = { 0, 3, 0 },
192 .hback_porch = { 0, 39, 0 },
193 .hsync_len = { 0, 3, 0 },
194
195 .vactive = { 0, 320, 0 },
196 .vfront_porch = { 0, 2, 0 },
197 .vback_porch = { 0, 7, 0 },
198 .vsync_len = { 0, 1, 0 },
199
200 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
201 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
191}; 202};
192 203
193static struct omap_dss_device ldp_lcd_device = { 204static struct panel_dpi_platform_data ldp_lcd_pdata = {
194 .name = "lcd", 205 .name = "lcd",
195 .driver_name = "generic_dpi_panel", 206 .source = "dpi.0",
196 .type = OMAP_DISPLAY_TYPE_DPI, 207
197 .phy.dpi.data_lines = 18, 208 .data_lines = 18,
198 .data = &ldp_panel_data, 209
210 .display_timing = &ldp_lcd_videomode,
211
212 .enable_gpio = -1, /* filled in code */
213 .backlight_gpio = -1, /* filled in code */
199}; 214};
200 215
201static struct omap_dss_device *ldp_dss_devices[] = { 216static struct platform_device ldp_lcd_device = {
202 &ldp_lcd_device, 217 .name = "panel-dpi",
218 .id = 0,
219 .dev.platform_data = &ldp_lcd_pdata,
203}; 220};
204 221
205static struct omap_dss_board_info ldp_dss_data = { 222static struct omap_dss_board_info ldp_dss_data = {
206 .num_devices = ARRAY_SIZE(ldp_dss_devices), 223 .default_display_name = "lcd",
207 .devices = ldp_dss_devices,
208 .default_device = &ldp_lcd_device,
209}; 224};
210 225
211static void __init ldp_display_init(void) 226static void __init ldp_display_init(void)
212{ 227{
213 ldp_panel_data.gpios[2] = LCD_PANEL_RESET_GPIO; 228 int r;
214 ldp_panel_data.gpios[3] = LCD_PANEL_QVGA_GPIO; 229
230 static struct gpio gpios[] __initdata = {
231 {LCD_PANEL_RESET_GPIO, GPIOF_OUT_INIT_HIGH, "LCD RESET"},
232 {LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "LCD QVGA"},
233 };
234
235 r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
236 if (r) {
237 pr_err("Cannot request LCD GPIOs, error %d\n", r);
238 return;
239 }
215 240
216 omap_display_init(&ldp_dss_data); 241 omap_display_init(&ldp_dss_data);
217} 242}
218 243
219static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) 244static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
220{ 245{
221 ldp_panel_data.gpios[0] = gpio + 7; 246 /* LCD enable GPIO */
222 ldp_panel_data.gpio_invert[0] = true; 247 ldp_lcd_pdata.enable_gpio = gpio + 7;
223 248
224 ldp_panel_data.gpios[1] = gpio + 15; 249 /* Backlight enable GPIO */
225 ldp_panel_data.gpio_invert[1] = true; 250 ldp_lcd_pdata.backlight_gpio = gpio + 15;
226 251
227 return 0; 252 return 0;
228} 253}
@@ -322,6 +347,7 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
322 347
323static struct platform_device *ldp_devices[] __initdata = { 348static struct platform_device *ldp_devices[] __initdata = {
324 &ldp_gpio_keys_device, 349 &ldp_gpio_keys_device,
350 &ldp_lcd_device,
325}; 351};
326 352
327#ifdef CONFIG_OMAP_MUX 353#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e95..827d15009a86 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
122}; 122};
123 123
124static struct musb_hdrc_platform_data tusb_data = { 124static struct musb_hdrc_platform_data tusb_data = {
125#ifdef CONFIG_USB_GADGET_MUSB_HDRC
126 .mode = MUSB_OTG, 125 .mode = MUSB_OTG,
127#else
128 .mode = MUSB_HOST,
129#endif
130 .set_power = tusb_set_power, 126 .set_power = tusb_set_power,
131 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ 127 .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
132 .power = 100, /* Max 100 mA VBUS for host mode */ 128 .power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 1c6ae5f5bae7..f26918467efc 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -225,35 +225,46 @@ static struct mtd_partition omap3beagle_nand_partitions[] = {
225 225
226/* DSS */ 226/* DSS */
227 227
228static struct tfp410_platform_data dvi_panel = { 228static struct connector_dvi_platform_data beagle_dvi_connector_pdata = {
229 .i2c_bus_num = 3, 229 .name = "dvi",
230 .power_down_gpio = -1, 230 .source = "tfp410.0",
231 .i2c_bus_num = 3,
231}; 232};
232 233
233static struct omap_dss_device beagle_dvi_device = { 234static struct platform_device beagle_dvi_connector_device = {
234 .type = OMAP_DISPLAY_TYPE_DPI, 235 .name = "connector-dvi",
235 .name = "dvi", 236 .id = 0,
236 .driver_name = "tfp410", 237 .dev.platform_data = &beagle_dvi_connector_pdata,
237 .data = &dvi_panel,
238 .phy.dpi.data_lines = 24,
239}; 238};
240 239
241static struct omap_dss_device beagle_tv_device = { 240static struct encoder_tfp410_platform_data beagle_tfp410_pdata = {
241 .name = "tfp410.0",
242 .source = "dpi.0",
243 .data_lines = 24,
244 .power_down_gpio = -1,
245};
246
247static struct platform_device beagle_tfp410_device = {
248 .name = "tfp410",
249 .id = 0,
250 .dev.platform_data = &beagle_tfp410_pdata,
251};
252
253static struct connector_atv_platform_data beagle_tv_pdata = {
242 .name = "tv", 254 .name = "tv",
243 .driver_name = "venc", 255 .source = "venc.0",
244 .type = OMAP_DISPLAY_TYPE_VENC, 256 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
245 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, 257 .invert_polarity = false,
246}; 258};
247 259
248static struct omap_dss_device *beagle_dss_devices[] = { 260static struct platform_device beagle_tv_connector_device = {
249 &beagle_dvi_device, 261 .name = "connector-analog-tv",
250 &beagle_tv_device, 262 .id = 0,
263 .dev.platform_data = &beagle_tv_pdata,
251}; 264};
252 265
253static struct omap_dss_board_info beagle_dss_data = { 266static struct omap_dss_board_info beagle_dss_data = {
254 .num_devices = ARRAY_SIZE(beagle_dss_devices), 267 .default_display_name = "dvi",
255 .devices = beagle_dss_devices,
256 .default_device = &beagle_dvi_device,
257}; 268};
258 269
259#include "sdram-micron-mt46h32m32lf-6.h" 270#include "sdram-micron-mt46h32m32lf-6.h"
@@ -332,7 +343,11 @@ static int beagle_twl_gpio_setup(struct device *dev,
332 if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) 343 if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
333 pr_err("%s: unable to configure EHCI_nOC\n", __func__); 344 pr_err("%s: unable to configure EHCI_nOC\n", __func__);
334 } 345 }
335 dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio; 346 beagle_tfp410_pdata.power_down_gpio = beagle_config.dvi_pd_gpio;
347
348 platform_device_register(&beagle_tfp410_device);
349 platform_device_register(&beagle_dvi_connector_device);
350 platform_device_register(&beagle_tv_connector_device);
336 351
337 /* TWL4030_GPIO_MAX i.e. LED_GPO controls HS USB Port 2 power */ 352 /* TWL4030_GPIO_MAX i.e. LED_GPO controls HS USB Port 2 power */
338 phy_data[0].vcc_gpio = gpio + TWL4030_GPIO_MAX; 353 phy_data[0].vcc_gpio = gpio + TWL4030_GPIO_MAX;
@@ -547,6 +562,7 @@ static void __init omap3_beagle_init(void)
547 if (gpio_is_valid(beagle_config.dvi_pd_gpio)) 562 if (gpio_is_valid(beagle_config.dvi_pd_gpio))
548 omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT); 563 omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
549 omap_display_init(&beagle_dss_data); 564 omap_display_init(&beagle_dss_data);
565
550 omap_serial_init(); 566 omap_serial_init();
551 omap_sdrc_init(mt46h32m32lf6_sdrc_params, 567 omap_sdrc_init(mt46h32m32lf6_sdrc_params,
552 mt46h32m32lf6_sdrc_params); 568 mt46h32m32lf6_sdrc_params);
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 52bdddd41e0e..18143873346c 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -166,14 +166,6 @@ static inline void __init omap3evm_init_smsc911x(void) { return; }
166 */ 166 */
167#define OMAP3EVM_DVI_PANEL_EN_GPIO 199 167#define OMAP3EVM_DVI_PANEL_EN_GPIO 199
168 168
169static struct panel_sharp_ls037v7dw01_data omap3_evm_lcd_data = {
170 .resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
171 .ini_gpio = OMAP3EVM_LCD_PANEL_INI,
172 .mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
173 .lr_gpio = OMAP3EVM_LCD_PANEL_LR,
174 .ud_gpio = OMAP3EVM_LCD_PANEL_UD,
175};
176
177#ifdef CONFIG_BROKEN 169#ifdef CONFIG_BROKEN
178static void __init omap3_evm_display_init(void) 170static void __init omap3_evm_display_init(void)
179{ 171{
@@ -196,44 +188,65 @@ static void __init omap3_evm_display_init(void)
196} 188}
197#endif 189#endif
198 190
199static struct omap_dss_device omap3_evm_lcd_device = { 191static struct panel_sharp_ls037v7dw01_platform_data omap3_evm_lcd_pdata = {
200 .name = "lcd", 192 .name = "lcd",
201 .driver_name = "sharp_ls_panel", 193 .source = "dpi.0",
202 .type = OMAP_DISPLAY_TYPE_DPI, 194
203 .phy.dpi.data_lines = 18, 195 .data_lines = 18,
204 .data = &omap3_evm_lcd_data, 196
197 .resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
198 .ini_gpio = OMAP3EVM_LCD_PANEL_INI,
199 .mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
200 .lr_gpio = OMAP3EVM_LCD_PANEL_LR,
201 .ud_gpio = OMAP3EVM_LCD_PANEL_UD,
202};
203
204static struct platform_device omap3_evm_lcd_device = {
205 .name = "panel-sharp-ls037v7dw01",
206 .id = 0,
207 .dev.platform_data = &omap3_evm_lcd_pdata,
208};
209
210static struct connector_dvi_platform_data omap3_evm_dvi_connector_pdata = {
211 .name = "dvi",
212 .source = "tfp410.0",
213 .i2c_bus_num = -1,
214};
215
216static struct platform_device omap3_evm_dvi_connector_device = {
217 .name = "connector-dvi",
218 .id = 0,
219 .dev.platform_data = &omap3_evm_dvi_connector_pdata,
205}; 220};
206 221
207static struct omap_dss_device omap3_evm_tv_device = { 222static struct encoder_tfp410_platform_data omap3_evm_tfp410_pdata = {
208 .name = "tv", 223 .name = "tfp410.0",
209 .driver_name = "venc", 224 .source = "dpi.0",
210 .type = OMAP_DISPLAY_TYPE_VENC, 225 .data_lines = 24,
211 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, 226 .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
212}; 227};
213 228
214static struct tfp410_platform_data dvi_panel = { 229static struct platform_device omap3_evm_tfp410_device = {
215 .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO, 230 .name = "tfp410",
216 .i2c_bus_num = -1, 231 .id = 0,
232 .dev.platform_data = &omap3_evm_tfp410_pdata,
217}; 233};
218 234
219static struct omap_dss_device omap3_evm_dvi_device = { 235static struct connector_atv_platform_data omap3_evm_tv_pdata = {
220 .name = "dvi", 236 .name = "tv",
221 .type = OMAP_DISPLAY_TYPE_DPI, 237 .source = "venc.0",
222 .driver_name = "tfp410", 238 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
223 .data = &dvi_panel, 239 .invert_polarity = false,
224 .phy.dpi.data_lines = 24,
225}; 240};
226 241
227static struct omap_dss_device *omap3_evm_dss_devices[] = { 242static struct platform_device omap3_evm_tv_connector_device = {
228 &omap3_evm_lcd_device, 243 .name = "connector-analog-tv",
229 &omap3_evm_tv_device, 244 .id = 0,
230 &omap3_evm_dvi_device, 245 .dev.platform_data = &omap3_evm_tv_pdata,
231}; 246};
232 247
233static struct omap_dss_board_info omap3_evm_dss_data = { 248static struct omap_dss_board_info omap3_evm_dss_data = {
234 .num_devices = ARRAY_SIZE(omap3_evm_dss_devices), 249 .default_display_name = "lcd",
235 .devices = omap3_evm_dss_devices,
236 .default_device = &omap3_evm_lcd_device,
237}; 250};
238 251
239static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = { 252static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
@@ -678,6 +691,10 @@ static void __init omap3_evm_init(void)
678 omap3_evm_i2c_init(); 691 omap3_evm_i2c_init();
679 692
680 omap_display_init(&omap3_evm_dss_data); 693 omap_display_init(&omap3_evm_dss_data);
694 platform_device_register(&omap3_evm_lcd_device);
695 platform_device_register(&omap3_evm_tfp410_device);
696 platform_device_register(&omap3_evm_dvi_connector_device);
697 platform_device_register(&omap3_evm_tv_connector_device);
681 698
682 omap_serial_init(); 699 omap_serial_init();
683 omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL); 700 omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index d2b455e70486..de1bc6bbe585 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -231,34 +231,21 @@ static struct twl4030_keypad_data pandora_kp_data = {
231 .rep = 1, 231 .rep = 1,
232}; 232};
233 233
234static struct panel_tpo_td043_data lcd_data = { 234static struct connector_atv_platform_data pandora_tv_pdata = {
235 .nreset_gpio = 157, 235 .name = "tv",
236}; 236 .source = "venc.0",
237 237 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
238static struct omap_dss_device pandora_lcd_device = { 238 .invert_polarity = false,
239 .name = "lcd",
240 .driver_name = "tpo_td043mtea1_panel",
241 .type = OMAP_DISPLAY_TYPE_DPI,
242 .phy.dpi.data_lines = 24,
243 .data = &lcd_data,
244};
245
246static struct omap_dss_device pandora_tv_device = {
247 .name = "tv",
248 .driver_name = "venc",
249 .type = OMAP_DISPLAY_TYPE_VENC,
250 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
251}; 239};
252 240
253static struct omap_dss_device *pandora_dss_devices[] = { 241static struct platform_device pandora_tv_connector_device = {
254 &pandora_lcd_device, 242 .name = "connector-analog-tv",
255 &pandora_tv_device, 243 .id = 0,
244 .dev.platform_data = &pandora_tv_pdata,
256}; 245};
257 246
258static struct omap_dss_board_info pandora_dss_data = { 247static struct omap_dss_board_info pandora_dss_data = {
259 .num_devices = ARRAY_SIZE(pandora_dss_devices), 248 .default_display_name = "lcd",
260 .devices = pandora_dss_devices,
261 .default_device = &pandora_lcd_device,
262}; 249};
263 250
264static void pandora_wl1251_init_card(struct mmc_card *card) 251static void pandora_wl1251_init_card(struct mmc_card *card)
@@ -348,7 +335,7 @@ static struct regulator_consumer_supply pandora_vdds_supplies[] = {
348}; 335};
349 336
350static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = { 337static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
351 REGULATOR_SUPPLY("vcc", "display0"), 338 REGULATOR_SUPPLY("vcc", "spi1.1"),
352}; 339};
353 340
354static struct regulator_consumer_supply pandora_usb_phy_supply[] = { 341static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
@@ -529,13 +516,21 @@ static int __init omap3pandora_i2c_init(void)
529 return 0; 516 return 0;
530} 517}
531 518
519static struct panel_tpo_td043mtea1_platform_data pandora_lcd_pdata = {
520 .name = "lcd",
521 .source = "dpi.0",
522
523 .data_lines = 24,
524 .nreset_gpio = 157,
525};
526
532static struct spi_board_info omap3pandora_spi_board_info[] __initdata = { 527static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
533 { 528 {
534 .modalias = "tpo_td043mtea1_panel_spi", 529 .modalias = "panel-tpo-td043mtea1",
535 .bus_num = 1, 530 .bus_num = 1,
536 .chip_select = 1, 531 .chip_select = 1,
537 .max_speed_hz = 375000, 532 .max_speed_hz = 375000,
538 .platform_data = &pandora_lcd_device, 533 .platform_data = &pandora_lcd_pdata,
539 } 534 }
540}; 535};
541 536
@@ -580,6 +575,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
580 &pandora_keys_gpio, 575 &pandora_keys_gpio,
581 &pandora_vwlan_device, 576 &pandora_vwlan_device,
582 &pandora_backlight, 577 &pandora_backlight,
578 &pandora_tv_connector_device,
583}; 579};
584 580
585static struct usbhs_omap_platform_data usbhs_bdata __initdata = { 581static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index d37e6b187ae4..ba8342fef799 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -93,40 +93,50 @@ static void __init omap3_stalker_display_init(void)
93{ 93{
94 return; 94 return;
95} 95}
96static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
97 .name = "dvi",
98 .source = "tfp410.0",
99 .i2c_bus_num = -1,
100};
96 101
97static struct omap_dss_device omap3_stalker_tv_device = { 102static struct platform_device omap3stalker_dvi_connector_device = {
98 .name = "tv", 103 .name = "connector-dvi",
99 .driver_name = "venc", 104 .id = 0,
100 .type = OMAP_DISPLAY_TYPE_VENC, 105 .dev.platform_data = &omap3stalker_dvi_connector_pdata,
101#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
102 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
103#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
104 .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE,
105#endif
106}; 106};
107 107
108static struct tfp410_platform_data dvi_panel = { 108static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
109 .power_down_gpio = DSS_ENABLE_GPIO, 109 .name = "tfp410.0",
110 .i2c_bus_num = -1, 110 .source = "dpi.0",
111 .data_lines = 24,
112 .power_down_gpio = DSS_ENABLE_GPIO,
111}; 113};
112 114
113static struct omap_dss_device omap3_stalker_dvi_device = { 115static struct platform_device omap3stalker_tfp410_device = {
114 .name = "dvi", 116 .name = "tfp410",
115 .type = OMAP_DISPLAY_TYPE_DPI, 117 .id = 0,
116 .driver_name = "tfp410", 118 .dev.platform_data = &omap3stalker_tfp410_pdata,
117 .data = &dvi_panel, 119};
118 .phy.dpi.data_lines = 24, 120
121static struct connector_atv_platform_data omap3stalker_tv_pdata = {
122 .name = "tv",
123 .source = "venc.0",
124#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
125 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
126#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
127 .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
128#endif
129 .invert_polarity = false,
119}; 130};
120 131
121static struct omap_dss_device *omap3_stalker_dss_devices[] = { 132static struct platform_device omap3stalker_tv_connector_device = {
122 &omap3_stalker_tv_device, 133 .name = "connector-analog-tv",
123 &omap3_stalker_dvi_device, 134 .id = 0,
135 .dev.platform_data = &omap3stalker_tv_pdata,
124}; 136};
125 137
126static struct omap_dss_board_info omap3_stalker_dss_data = { 138static struct omap_dss_board_info omap3_stalker_dss_data = {
127 .num_devices = ARRAY_SIZE(omap3_stalker_dss_devices), 139 .default_display_name = "dvi",
128 .devices = omap3_stalker_dss_devices,
129 .default_device = &omap3_stalker_dvi_device,
130}; 140};
131 141
132static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = { 142static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = {
@@ -356,6 +366,9 @@ static struct usbhs_phy_data phy_data[] __initdata = {
356 366
357static struct platform_device *omap3_stalker_devices[] __initdata = { 367static struct platform_device *omap3_stalker_devices[] __initdata = {
358 &keys_gpio, 368 &keys_gpio,
369 &omap3stalker_tfp410_device,
370 &omap3stalker_dvi_connector_device,
371 &omap3stalker_tv_connector_device,
359}; 372};
360 373
361static struct usbhs_omap_platform_data usbhs_bdata __initdata = { 374static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5748b5d06c23..f6d384111911 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -72,6 +72,9 @@
72#define OVERO_SMSC911X2_CS 4 72#define OVERO_SMSC911X2_CS 4
73#define OVERO_SMSC911X2_GPIO 65 73#define OVERO_SMSC911X2_GPIO 65
74 74
75/* whether to register LCD35 instead of LCD43 */
76static bool overo_use_lcd35;
77
75#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ 78#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
76 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) 79 defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
77 80
@@ -149,78 +152,94 @@ static inline void __init overo_init_smsc911x(void) { return; }
149#define OVERO_GPIO_LCD_EN 144 152#define OVERO_GPIO_LCD_EN 144
150#define OVERO_GPIO_LCD_BL 145 153#define OVERO_GPIO_LCD_BL 145
151 154
152static struct tfp410_platform_data dvi_panel = { 155static struct connector_atv_platform_data overo_tv_pdata = {
153 .i2c_bus_num = 3, 156 .name = "tv",
154 .power_down_gpio = -1, 157 .source = "venc.0",
158 .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
159 .invert_polarity = false,
155}; 160};
156 161
157static struct omap_dss_device overo_dvi_device = { 162static struct platform_device overo_tv_connector_device = {
158 .name = "dvi", 163 .name = "connector-analog-tv",
159 .type = OMAP_DISPLAY_TYPE_DPI, 164 .id = 0,
160 .driver_name = "tfp410", 165 .dev.platform_data = &overo_tv_pdata,
161 .data = &dvi_panel,
162 .phy.dpi.data_lines = 24,
163}; 166};
164 167
165static struct omap_dss_device overo_tv_device = { 168static const struct display_timing overo_lcd43_videomode = {
166 .name = "tv", 169 .pixelclock = { 0, 9200000, 0 },
167 .driver_name = "venc", 170
168 .type = OMAP_DISPLAY_TYPE_VENC, 171 .hactive = { 0, 480, 0 },
169 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO, 172 .hfront_porch = { 0, 8, 0 },
173 .hback_porch = { 0, 4, 0 },
174 .hsync_len = { 0, 41, 0 },
175
176 .vactive = { 0, 272, 0 },
177 .vfront_porch = { 0, 4, 0 },
178 .vback_porch = { 0, 2, 0 },
179 .vsync_len = { 0, 10, 0 },
180
181 .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
182 DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
170}; 183};
171 184
172static struct panel_generic_dpi_data lcd43_panel = { 185static struct panel_dpi_platform_data overo_lcd43_pdata = {
173 .name = "samsung_lte430wq_f0c", 186 .name = "lcd43",
174 .num_gpios = 2, 187 .source = "dpi.0",
175 .gpios = { 188
176 OVERO_GPIO_LCD_EN, 189 .data_lines = 24,
177 OVERO_GPIO_LCD_BL 190
178 }, 191 .display_timing = &overo_lcd43_videomode,
192
193 .enable_gpio = OVERO_GPIO_LCD_EN,
194 .backlight_gpio = OVERO_GPIO_LCD_BL,
179}; 195};
180 196
181static struct omap_dss_device overo_lcd43_device = { 197static struct platform_device overo_lcd43_device = {
182 .name = "lcd43", 198 .name = "panel-dpi",
183 .type = OMAP_DISPLAY_TYPE_DPI, 199 .id = 0,
184 .driver_name = "generic_dpi_panel", 200 .dev.platform_data = &overo_lcd43_pdata,
185 .data = &lcd43_panel,
186 .phy.dpi.data_lines = 24,
187}; 201};
188 202
189#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \ 203static struct connector_dvi_platform_data overo_dvi_connector_pdata = {
190 defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE) 204 .name = "dvi",
191static struct panel_generic_dpi_data lcd35_panel = { 205 .source = "tfp410.0",
192 .num_gpios = 2, 206 .i2c_bus_num = 3,
193 .gpios = {
194 OVERO_GPIO_LCD_EN,
195 OVERO_GPIO_LCD_BL
196 },
197}; 207};
198 208
199static struct omap_dss_device overo_lcd35_device = { 209static struct platform_device overo_dvi_connector_device = {
200 .type = OMAP_DISPLAY_TYPE_DPI, 210 .name = "connector-dvi",
201 .name = "lcd35", 211 .id = 0,
202 .driver_name = "lgphilips_lb035q02_panel", 212 .dev.platform_data = &overo_dvi_connector_pdata,
203 .phy.dpi.data_lines = 24,
204 .data = &lcd35_panel,
205}; 213};
206#endif
207 214
208static struct omap_dss_device *overo_dss_devices[] = { 215static struct encoder_tfp410_platform_data overo_tfp410_pdata = {
209 &overo_dvi_device, 216 .name = "tfp410.0",
210 &overo_tv_device, 217 .source = "dpi.0",
211#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \ 218 .data_lines = 24,
212 defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE) 219 .power_down_gpio = -1,
213 &overo_lcd35_device, 220};
214#endif 221
215 &overo_lcd43_device, 222static struct platform_device overo_tfp410_device = {
223 .name = "tfp410",
224 .id = 0,
225 .dev.platform_data = &overo_tfp410_pdata,
216}; 226};
217 227
218static struct omap_dss_board_info overo_dss_data = { 228static struct omap_dss_board_info overo_dss_data = {
219 .num_devices = ARRAY_SIZE(overo_dss_devices), 229 .default_display_name = "lcd43",
220 .devices = overo_dss_devices,
221 .default_device = &overo_dvi_device,
222}; 230};
223 231
232static void __init overo_display_init(void)
233{
234 omap_display_init(&overo_dss_data);
235
236 if (!overo_use_lcd35)
237 platform_device_register(&overo_lcd43_device);
238 platform_device_register(&overo_tfp410_device);
239 platform_device_register(&overo_dvi_connector_device);
240 platform_device_register(&overo_tv_connector_device);
241}
242
224static struct mtd_partition overo_nand_partitions[] = { 243static struct mtd_partition overo_nand_partitions[] = {
225 { 244 {
226 .name = "xloader", 245 .name = "xloader",
@@ -408,24 +427,41 @@ static int __init overo_i2c_init(void)
408 return 0; 427 return 0;
409} 428}
410 429
430static struct panel_lb035q02_platform_data overo_lcd35_pdata = {
431 .name = "lcd35",
432 .source = "dpi.0",
433
434 .data_lines = 24,
435
436 .enable_gpio = OVERO_GPIO_LCD_EN,
437 .backlight_gpio = OVERO_GPIO_LCD_BL,
438};
439
440/*
441 * NOTE: We need to add either the lgphilips panel, or the lcd43 panel. The
442 * selection is done based on the overo_use_lcd35 field. If new SPI
443 * devices are added here, extra work is needed to make only the lgphilips panel
444 * affected by the overo_use_lcd35 field.
445 */
411static struct spi_board_info overo_spi_board_info[] __initdata = { 446static struct spi_board_info overo_spi_board_info[] __initdata = {
412#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
413 defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
414 { 447 {
415 .modalias = "lgphilips_lb035q02_panel-spi", 448 .modalias = "panel_lgphilips_lb035q02",
416 .bus_num = 1, 449 .bus_num = 1,
417 .chip_select = 1, 450 .chip_select = 1,
418 .max_speed_hz = 500000, 451 .max_speed_hz = 500000,
419 .mode = SPI_MODE_3, 452 .mode = SPI_MODE_3,
453 .platform_data = &overo_lcd35_pdata,
420 }, 454 },
421#endif
422}; 455};
423 456
424static int __init overo_spi_init(void) 457static int __init overo_spi_init(void)
425{ 458{
426 overo_ads7846_init(); 459 overo_ads7846_init();
427 spi_register_board_info(overo_spi_board_info, 460
428 ARRAY_SIZE(overo_spi_board_info)); 461 if (overo_use_lcd35) {
462 spi_register_board_info(overo_spi_board_info,
463 ARRAY_SIZE(overo_spi_board_info));
464 }
429 return 0; 465 return 0;
430} 466}
431 467
@@ -463,11 +499,13 @@ static void __init overo_init(void)
463{ 499{
464 int ret; 500 int ret;
465 501
502 if (strstr(boot_command_line, "omapdss.def_disp=lcd35"))
503 overo_use_lcd35 = true;
504
466 regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); 505 regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
467 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 506 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
468 overo_i2c_init(); 507 overo_i2c_init();
469 omap_hsmmc_init(mmc); 508 omap_hsmmc_init(mmc);
470 omap_display_init(&overo_dss_data);
471 omap_serial_init(); 509 omap_serial_init();
472 omap_sdrc_init(mt46h32m32lf6_sdrc_params, 510 omap_sdrc_init(mt46h32m32lf6_sdrc_params,
473 mt46h32m32lf6_sdrc_params); 511 mt46h32m32lf6_sdrc_params);
@@ -484,6 +522,8 @@ static void __init overo_init(void)
484 overo_init_keys(); 522 overo_init_keys();
485 omap_twl4030_audio_init("overo", NULL); 523 omap_twl4030_audio_init("overo", NULL);
486 524
525 overo_display_init();
526
487 /* Ensure SDRC pins are mux'd for self-refresh */ 527 /* Ensure SDRC pins are mux'd for self-refresh */
488 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 528 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
489 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); 529 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 9c2dd102fbbb..c3270c0f1fce 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -45,6 +45,8 @@
45#include <linux/platform_data/tsl2563.h> 45#include <linux/platform_data/tsl2563.h>
46#include <linux/lis3lv02d.h> 46#include <linux/lis3lv02d.h>
47 47
48#include <video/omap-panel-data.h>
49
48#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE) 50#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
49#include <media/ir-rx51.h> 51#include <media/ir-rx51.h>
50#endif 52#endif
@@ -226,6 +228,15 @@ static struct lp55xx_platform_data rx51_lp5523_platform_data = {
226}; 228};
227#endif 229#endif
228 230
231#define RX51_LCD_RESET_GPIO 90
232
233static struct panel_acx565akm_platform_data acx_pdata = {
234 .name = "lcd",
235 .source = "sdi.0",
236 .reset_gpio = RX51_LCD_RESET_GPIO,
237 .datapairs = 2,
238};
239
229static struct omap2_mcspi_device_config wl1251_mcspi_config = { 240static struct omap2_mcspi_device_config wl1251_mcspi_config = {
230 .turbo_mode = 0, 241 .turbo_mode = 0,
231}; 242};
@@ -254,6 +265,7 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
254 .chip_select = 2, 265 .chip_select = 2,
255 .max_speed_hz = 6000000, 266 .max_speed_hz = 6000000,
256 .controller_data = &mipid_mcspi_config, 267 .controller_data = &mipid_mcspi_config,
268 .platform_data = &acx_pdata,
257 }, 269 },
258 [RX51_SPI_TSC2005] = { 270 [RX51_SPI_TSC2005] = {
259 .modalias = "tsc2005", 271 .modalias = "tsc2005",
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index bdd1e3a179e1..43a90c8d6837 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -29,34 +29,21 @@
29 29
30#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) 30#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
31 31
32static struct panel_acx565akm_data lcd_data = { 32static struct connector_atv_platform_data rx51_tv_pdata = {
33 .reset_gpio = RX51_LCD_RESET_GPIO, 33 .name = "tv",
34 .source = "venc.0",
35 .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
36 .invert_polarity = false,
34}; 37};
35 38
36static struct omap_dss_device rx51_lcd_device = { 39static struct platform_device rx51_tv_connector_device = {
37 .name = "lcd", 40 .name = "connector-analog-tv",
38 .driver_name = "panel-acx565akm", 41 .id = 0,
39 .type = OMAP_DISPLAY_TYPE_SDI, 42 .dev.platform_data = &rx51_tv_pdata,
40 .phy.sdi.datapairs = 2,
41 .data = &lcd_data,
42};
43
44static struct omap_dss_device rx51_tv_device = {
45 .name = "tv",
46 .type = OMAP_DISPLAY_TYPE_VENC,
47 .driver_name = "venc",
48 .phy.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE,
49};
50
51static struct omap_dss_device *rx51_dss_devices[] = {
52 &rx51_lcd_device,
53 &rx51_tv_device,
54}; 43};
55 44
56static struct omap_dss_board_info rx51_dss_board_info = { 45static struct omap_dss_board_info rx51_dss_board_info = {
57 .num_devices = ARRAY_SIZE(rx51_dss_devices), 46 .default_display_name = "lcd",
58 .devices = rx51_dss_devices,
59 .default_device = &rx51_lcd_device,
60}; 47};
61 48
62static int __init rx51_video_init(void) 49static int __init rx51_video_init(void)
@@ -71,6 +58,8 @@ static int __init rx51_video_init(void)
71 58
72 omap_display_init(&rx51_dss_board_info); 59 omap_display_init(&rx51_dss_board_info);
73 60
61 platform_device_register(&rx51_tv_connector_device);
62
74 return 0; 63 return 0;
75} 64}
76 65
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678a..7735105561d8 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
85 85
86static struct omap_musb_board_data musb_board_data = { 86static struct omap_musb_board_data musb_board_data = {
87 .interface_type = MUSB_INTERFACE_ULPI, 87 .interface_type = MUSB_INTERFACE_ULPI,
88 .mode = MUSB_PERIPHERAL, 88 .mode = MUSB_OTG,
89 .power = 0, 89 .power = 0,
90}; 90};
91 91
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index c2a079cb76fc..3d8ecc1e05bd 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -25,32 +25,23 @@
25#define LCD_PANEL_RESET_GPIO_PILOT 55 25#define LCD_PANEL_RESET_GPIO_PILOT 55
26#define LCD_PANEL_QVGA_GPIO 56 26#define LCD_PANEL_QVGA_GPIO 56
27 27
28static struct panel_nec_nl8048_data zoom_lcd_data = { 28static struct panel_nec_nl8048hl11_platform_data zoom_lcd_pdata = {
29 /* res_gpio filled in code */ 29 .name = "lcd",
30 .qvga_gpio = LCD_PANEL_QVGA_GPIO, 30 .source = "dpi.0",
31};
32 31
33static struct omap_dss_device zoom_lcd_device = { 32 .data_lines = 24,
34 .name = "lcd",
35 .driver_name = "NEC_8048_panel",
36 .type = OMAP_DISPLAY_TYPE_DPI,
37 .phy.dpi.data_lines = 24,
38 .data = &zoom_lcd_data,
39};
40 33
41static struct omap_dss_device *zoom_dss_devices[] = { 34 .res_gpio = -1, /* filled in code */
42 &zoom_lcd_device, 35 .qvga_gpio = LCD_PANEL_QVGA_GPIO,
43}; 36};
44 37
45static struct omap_dss_board_info zoom_dss_data = { 38static struct omap_dss_board_info zoom_dss_data = {
46 .num_devices = ARRAY_SIZE(zoom_dss_devices), 39 .default_display_name = "lcd",
47 .devices = zoom_dss_devices,
48 .default_device = &zoom_lcd_device,
49}; 40};
50 41
51static void __init zoom_lcd_panel_init(void) 42static void __init zoom_lcd_panel_init(void)
52{ 43{
53 zoom_lcd_data.res_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? 44 zoom_lcd_pdata.res_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
54 LCD_PANEL_RESET_GPIO_PROD : 45 LCD_PANEL_RESET_GPIO_PROD :
55 LCD_PANEL_RESET_GPIO_PILOT; 46 LCD_PANEL_RESET_GPIO_PILOT;
56} 47}
@@ -61,19 +52,20 @@ static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
61 52
62static struct spi_board_info nec_8048_spi_board_info[] __initdata = { 53static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
63 [0] = { 54 [0] = {
64 .modalias = "nec_8048_spi", 55 .modalias = "panel-nec-nl8048hl11",
65 .bus_num = 1, 56 .bus_num = 1,
66 .chip_select = 2, 57 .chip_select = 2,
67 .max_speed_hz = 375000, 58 .max_speed_hz = 375000,
68 .controller_data = &dss_lcd_mcspi_config, 59 .controller_data = &dss_lcd_mcspi_config,
60 .platform_data = &zoom_lcd_pdata,
69 }, 61 },
70}; 62};
71 63
72void __init zoom_display_init(void) 64void __init zoom_display_init(void)
73{ 65{
74 omap_display_init(&zoom_dss_data); 66 omap_display_init(&zoom_dss_data);
67 zoom_lcd_panel_init();
75 spi_register_board_info(nec_8048_spi_board_info, 68 spi_register_board_info(nec_8048_spi_board_info,
76 ARRAY_SIZE(nec_8048_spi_board_info)); 69 ARRAY_SIZE(nec_8048_spi_board_info));
77 zoom_lcd_panel_init();
78} 70}
79 71
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 3c1279f27d1f..73ae7536a32b 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -327,44 +327,6 @@ static void omap_init_audio(void)
327static inline void omap_init_audio(void) {} 327static inline void omap_init_audio(void) {}
328#endif 328#endif
329 329
330#if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \
331 defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE)
332
333static void __init omap_init_mcpdm(void)
334{
335 struct omap_hwmod *oh;
336 struct platform_device *pdev;
337
338 oh = omap_hwmod_lookup("mcpdm");
339 if (!oh)
340 return;
341
342 pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0);
343 WARN(IS_ERR(pdev), "Can't build omap_device for omap-mcpdm.\n");
344}
345#else
346static inline void omap_init_mcpdm(void) {}
347#endif
348
349#if defined(CONFIG_SND_OMAP_SOC_DMIC) || \
350 defined(CONFIG_SND_OMAP_SOC_DMIC_MODULE)
351
352static void __init omap_init_dmic(void)
353{
354 struct omap_hwmod *oh;
355 struct platform_device *pdev;
356
357 oh = omap_hwmod_lookup("dmic");
358 if (!oh)
359 return;
360
361 pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0);
362 WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n");
363}
364#else
365static inline void omap_init_dmic(void) {}
366#endif
367
368#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \ 330#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
369 defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE) 331 defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
370 332
@@ -565,8 +527,6 @@ static int __init omap2_init_devices(void)
565 omap_init_mbox(); 527 omap_init_mbox();
566 /* If dtb is there, the devices will be created dynamically */ 528 /* If dtb is there, the devices will be created dynamically */
567 if (!of_have_populated_dt()) { 529 if (!of_have_populated_dt()) {
568 omap_init_dmic();
569 omap_init_mcpdm();
570 omap_init_mcspi(); 530 omap_init_mcspi();
571 omap_init_sham(); 531 omap_init_sham();
572 omap_init_aes(); 532 omap_init_aes();
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index ff37be1f6f93..03a0516c7f67 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -400,7 +400,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
400 400
401 /* Create devices for DPI and SDI */ 401 /* Create devices for DPI and SDI */
402 402
403 pdev = create_simple_dss_pdev("omapdss_dpi", -1, 403 pdev = create_simple_dss_pdev("omapdss_dpi", 0,
404 board_data, sizeof(*board_data), dss_pdev); 404 board_data, sizeof(*board_data), dss_pdev);
405 if (IS_ERR(pdev)) { 405 if (IS_ERR(pdev)) {
406 pr_err("Could not build platform_device for omapdss_dpi\n"); 406 pr_err("Could not build platform_device for omapdss_dpi\n");
@@ -408,7 +408,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
408 } 408 }
409 409
410 if (cpu_is_omap34xx()) { 410 if (cpu_is_omap34xx()) {
411 pdev = create_simple_dss_pdev("omapdss_sdi", -1, 411 pdev = create_simple_dss_pdev("omapdss_sdi", 0,
412 board_data, sizeof(*board_data), dss_pdev); 412 board_data, sizeof(*board_data), dss_pdev);
413 if (IS_ERR(pdev)) { 413 if (IS_ERR(pdev)) {
414 pr_err("Could not build platform_device for omapdss_sdi\n"); 414 pr_err("Could not build platform_device for omapdss_sdi\n");
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeefaebb0..bf89effa4c99 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -25,6 +25,7 @@
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/platform_device.h>
28 29
29#include <video/omapdss.h> 30#include <video/omapdss.h>
30#include <video/omap-panel-data.h> 31#include <video/omap-panel-data.h>
@@ -37,70 +38,76 @@
37#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */ 38#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
38#define HDMI_GPIO_HPD 63 /* Hotplug detect */ 39#define HDMI_GPIO_HPD 63 /* Hotplug detect */
39 40
40/* Display DVI */
41#define PANDA_DVI_TFP410_POWER_DOWN_GPIO 0 41#define PANDA_DVI_TFP410_POWER_DOWN_GPIO 0
42 42
43/* Using generic display panel */ 43/* DVI Connector */
44static struct tfp410_platform_data omap4_dvi_panel = { 44static struct connector_dvi_platform_data omap4_panda_dvi_connector_pdata = {
45 .i2c_bus_num = 3, 45 .name = "dvi",
46 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, 46 .source = "tfp410.0",
47 .i2c_bus_num = 2,
47}; 48};
48 49
49static struct omap_dss_device omap4_panda_dvi_device = { 50static struct platform_device omap4_panda_dvi_connector_device = {
50 .type = OMAP_DISPLAY_TYPE_DPI, 51 .name = "connector-dvi",
51 .name = "dvi", 52 .id = 0,
52 .driver_name = "tfp410", 53 .dev.platform_data = &omap4_panda_dvi_connector_pdata,
53 .data = &omap4_dvi_panel,
54 .phy.dpi.data_lines = 24,
55 .channel = OMAP_DSS_CHANNEL_LCD2,
56}; 54};
57 55
58static struct omap_dss_hdmi_data omap4_panda_hdmi_data = { 56/* TFP410 DPI-to-DVI chip */
57static struct encoder_tfp410_platform_data omap4_panda_tfp410_pdata = {
58 .name = "tfp410.0",
59 .source = "dpi.0",
60 .data_lines = 24,
61 .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
62};
63
64static struct platform_device omap4_panda_tfp410_device = {
65 .name = "tfp410",
66 .id = 0,
67 .dev.platform_data = &omap4_panda_tfp410_pdata,
68};
69
70/* HDMI Connector */
71static struct connector_hdmi_platform_data omap4_panda_hdmi_connector_pdata = {
72 .name = "hdmi",
73 .source = "tpd12s015.0",
74};
75
76static struct platform_device omap4_panda_hdmi_connector_device = {
77 .name = "connector-hdmi",
78 .id = 0,
79 .dev.platform_data = &omap4_panda_hdmi_connector_pdata,
80};
81
82/* TPD12S015 HDMI ESD protection & level shifter chip */
83static struct encoder_tpd12s015_platform_data omap4_panda_tpd_pdata = {
84 .name = "tpd12s015.0",
85 .source = "hdmi.0",
86
59 .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD, 87 .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD,
60 .ls_oe_gpio = HDMI_GPIO_LS_OE, 88 .ls_oe_gpio = HDMI_GPIO_LS_OE,
61 .hpd_gpio = HDMI_GPIO_HPD, 89 .hpd_gpio = HDMI_GPIO_HPD,
62}; 90};
63 91
64static struct omap_dss_device omap4_panda_hdmi_device = { 92static struct platform_device omap4_panda_tpd_device = {
65 .name = "hdmi", 93 .name = "tpd12s015",
66 .driver_name = "hdmi_panel", 94 .id = 0,
67 .type = OMAP_DISPLAY_TYPE_HDMI, 95 .dev.platform_data = &omap4_panda_tpd_pdata,
68 .channel = OMAP_DSS_CHANNEL_DIGIT,
69 .data = &omap4_panda_hdmi_data,
70};
71
72static struct omap_dss_device *omap4_panda_dss_devices[] = {
73 &omap4_panda_dvi_device,
74 &omap4_panda_hdmi_device,
75}; 96};
76 97
77static struct omap_dss_board_info omap4_panda_dss_data = { 98static struct omap_dss_board_info omap4_panda_dss_data = {
78 .num_devices = ARRAY_SIZE(omap4_panda_dss_devices), 99 .default_display_name = "dvi",
79 .devices = omap4_panda_dss_devices,
80 .default_device = &omap4_panda_dvi_device,
81}; 100};
82 101
83void __init omap4_panda_display_init(void) 102void __init omap4_panda_display_init_of(void)
84{ 103{
85 omap_display_init(&omap4_panda_dss_data); 104 omap_display_init(&omap4_panda_dss_data);
86 105
87 /* 106 platform_device_register(&omap4_panda_tfp410_device);
88 * OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and 107 platform_device_register(&omap4_panda_dvi_connector_device);
89 * later have external pull up on the HDMI I2C lines
90 */
91 if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2)
92 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
93 else
94 omap_hdmi_init(0);
95
96 omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
97 omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
98 omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
99}
100 108
101void __init omap4_panda_display_init_of(void) 109 platform_device_register(&omap4_panda_tpd_device);
102{ 110 platform_device_register(&omap4_panda_hdmi_connector_device);
103 omap_display_init(&omap4_panda_dss_data);
104} 111}
105 112
106 113
@@ -109,93 +116,73 @@ void __init omap4_panda_display_init_of(void)
109#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */ 116#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */
110#define DLP_POWER_ON_GPIO 40 117#define DLP_POWER_ON_GPIO 40
111 118
112static struct nokia_dsi_panel_data dsi1_panel = { 119static struct panel_dsicm_platform_data dsi1_panel = {
113 .name = "taal", 120 .name = "lcd",
114 .reset_gpio = 102, 121 .source = "dsi.0",
115 .use_ext_te = false, 122 .reset_gpio = 102,
116 .ext_te_gpio = 101, 123 .use_ext_te = false,
117 .esd_interval = 0, 124 .ext_te_gpio = 101,
118 .pin_config = { 125 .pin_config = {
119 .num_pins = 6, 126 .num_pins = 6,
120 .pins = { 0, 1, 2, 3, 4, 5 }, 127 .pins = { 0, 1, 2, 3, 4, 5 },
121 },
122};
123
124static struct omap_dss_device sdp4430_lcd_device = {
125 .name = "lcd",
126 .driver_name = "taal",
127 .type = OMAP_DISPLAY_TYPE_DSI,
128 .data = &dsi1_panel,
129 .phy.dsi = {
130 .module = 0,
131 }, 128 },
132 .channel = OMAP_DSS_CHANNEL_LCD,
133}; 129};
134 130
135static struct nokia_dsi_panel_data dsi2_panel = { 131static struct platform_device sdp4430_lcd_device = {
136 .name = "taal", 132 .name = "panel-dsi-cm",
137 .reset_gpio = 104, 133 .id = 0,
138 .use_ext_te = false, 134 .dev.platform_data = &dsi1_panel,
139 .ext_te_gpio = 103,
140 .esd_interval = 0,
141 .pin_config = {
142 .num_pins = 6,
143 .pins = { 0, 1, 2, 3, 4, 5 },
144 },
145}; 135};
146 136
147static struct omap_dss_device sdp4430_lcd2_device = { 137static struct panel_dsicm_platform_data dsi2_panel = {
148 .name = "lcd2", 138 .name = "lcd2",
149 .driver_name = "taal", 139 .source = "dsi.1",
150 .type = OMAP_DISPLAY_TYPE_DSI, 140 .reset_gpio = 104,
151 .data = &dsi2_panel, 141 .use_ext_te = false,
152 .phy.dsi = { 142 .ext_te_gpio = 103,
153 143 .pin_config = {
154 .module = 1, 144 .num_pins = 6,
145 .pins = { 0, 1, 2, 3, 4, 5 },
155 }, 146 },
156 .channel = OMAP_DSS_CHANNEL_LCD2,
157}; 147};
158 148
159static struct omap_dss_hdmi_data sdp4430_hdmi_data = { 149static struct platform_device sdp4430_lcd2_device = {
160 .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD, 150 .name = "panel-dsi-cm",
161 .ls_oe_gpio = HDMI_GPIO_LS_OE, 151 .id = 1,
162 .hpd_gpio = HDMI_GPIO_HPD, 152 .dev.platform_data = &dsi2_panel,
163}; 153};
164 154
165static struct omap_dss_device sdp4430_hdmi_device = { 155/* HDMI Connector */
166 .name = "hdmi", 156static struct connector_hdmi_platform_data sdp4430_hdmi_connector_pdata = {
167 .driver_name = "hdmi_panel", 157 .name = "hdmi",
168 .type = OMAP_DISPLAY_TYPE_HDMI, 158 .source = "tpd12s015.0",
169 .channel = OMAP_DSS_CHANNEL_DIGIT,
170 .data = &sdp4430_hdmi_data,
171}; 159};
172 160
173static struct picodlp_panel_data sdp4430_picodlp_pdata = { 161static struct platform_device sdp4430_hdmi_connector_device = {
174 .picodlp_adapter_id = 2, 162 .name = "connector-hdmi",
175 .emu_done_gpio = 44, 163 .id = 0,
176 .pwrgood_gpio = 45, 164 .dev.platform_data = &sdp4430_hdmi_connector_pdata,
177}; 165};
178 166
179static struct omap_dss_device sdp4430_picodlp_device = { 167/* TPD12S015 HDMI ESD protection & level shifter chip */
180 .name = "picodlp", 168static struct encoder_tpd12s015_platform_data sdp4430_tpd_pdata = {
181 .driver_name = "picodlp_panel", 169 .name = "tpd12s015.0",
182 .type = OMAP_DISPLAY_TYPE_DPI, 170 .source = "hdmi.0",
183 .phy.dpi.data_lines = 24, 171
184 .channel = OMAP_DSS_CHANNEL_LCD2, 172 .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD,
185 .data = &sdp4430_picodlp_pdata, 173 .ls_oe_gpio = HDMI_GPIO_LS_OE,
174 .hpd_gpio = HDMI_GPIO_HPD,
186}; 175};
187 176
188static struct omap_dss_device *sdp4430_dss_devices[] = { 177static struct platform_device sdp4430_tpd_device = {
189 &sdp4430_lcd_device, 178 .name = "tpd12s015",
190 &sdp4430_lcd2_device, 179 .id = 0,
191 &sdp4430_hdmi_device, 180 .dev.platform_data = &sdp4430_tpd_pdata,
192 &sdp4430_picodlp_device,
193}; 181};
194 182
183
195static struct omap_dss_board_info sdp4430_dss_data = { 184static struct omap_dss_board_info sdp4430_dss_data = {
196 .num_devices = ARRAY_SIZE(sdp4430_dss_devices), 185 .default_display_name = "lcd",
197 .devices = sdp4430_dss_devices,
198 .default_device = &sdp4430_lcd_device,
199}; 186};
200 187
201/* 188/*
@@ -204,7 +191,7 @@ static struct omap_dss_board_info sdp4430_dss_data = {
204 * used by picodlp on the 4430sdp platform. Keep this gpio disabled as LCD2 is 191 * used by picodlp on the 4430sdp platform. Keep this gpio disabled as LCD2 is
205 * selected by default 192 * selected by default
206 */ 193 */
207void __init omap_4430sdp_display_init(void) 194void __init omap_4430sdp_display_init_of(void)
208{ 195{
209 int r; 196 int r;
210 197
@@ -219,33 +206,10 @@ void __init omap_4430sdp_display_init(void)
219 pr_err("%s: Could not get DLP POWER ON GPIO\n", __func__); 206 pr_err("%s: Could not get DLP POWER ON GPIO\n", __func__);
220 207
221 omap_display_init(&sdp4430_dss_data); 208 omap_display_init(&sdp4430_dss_data);
222 /*
223 * OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and
224 * later have external pull up on the HDMI I2C lines
225 */
226 if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2)
227 omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
228 else
229 omap_hdmi_init(0);
230
231 omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
232 omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
233 omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
234}
235
236void __init omap_4430sdp_display_init_of(void)
237{
238 int r;
239 209
240 r = gpio_request_one(DISPLAY_SEL_GPIO, GPIOF_OUT_INIT_HIGH, 210 platform_device_register(&sdp4430_lcd_device);
241 "display_sel"); 211 platform_device_register(&sdp4430_lcd2_device);
242 if (r)
243 pr_err("%s: Could not get display_sel GPIO\n", __func__);
244
245 r = gpio_request_one(DLP_POWER_ON_GPIO, GPIOF_OUT_INIT_LOW,
246 "DLP POWER ON");
247 if (r)
248 pr_err("%s: Could not get DLP POWER ON GPIO\n", __func__);
249 212
250 omap_display_init(&sdp4430_dss_data); 213 platform_device_register(&sdp4430_tpd_device);
214 platform_device_register(&sdp4430_hdmi_connector_device);
251} 215}
diff --git a/arch/arm/mach-omap2/dss-common.h b/arch/arm/mach-omap2/dss-common.h
index 915f6fff5106..c28fe3c03588 100644
--- a/arch/arm/mach-omap2/dss-common.h
+++ b/arch/arm/mach-omap2/dss-common.h
@@ -6,9 +6,7 @@
6 * This file will be removed when DSS supports DT. 6 * This file will be removed when DSS supports DT.
7 */ 7 */
8 8
9void __init omap4_panda_display_init(void);
10void __init omap4_panda_display_init_of(void); 9void __init omap4_panda_display_init_of(void);
11void __init omap_4430sdp_display_init(void);
12void __init omap_4430sdp_display_init_of(void); 10void __init omap_4430sdp_display_init_of(void);
13 11
14#endif 12#endif
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index f3fdd6afa213..9f4795aff48a 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -149,7 +149,7 @@ struct omap3_gpmc_regs {
149 149
150static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ]; 150static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
151static struct irq_chip gpmc_irq_chip; 151static struct irq_chip gpmc_irq_chip;
152static unsigned gpmc_irq_start; 152static int gpmc_irq_start;
153 153
154static struct resource gpmc_mem_root; 154static struct resource gpmc_mem_root;
155static struct resource gpmc_cs_mem[GPMC_CS_NUM]; 155static struct resource gpmc_cs_mem[GPMC_CS_NUM];
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index d940e53dd9f2..b456b4471f35 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -181,7 +181,7 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
181 sizeof(struct omap_i2c_bus_platform_data)); 181 sizeof(struct omap_i2c_bus_platform_data));
182 WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name); 182 WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);
183 183
184 return PTR_RET(pdev); 184 return PTR_ERR_OR_ZERO(pdev);
185} 185}
186 186
187static int __init omap_i2c_cmdline(void) 187static int __init omap_i2c_cmdline(void)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc92874be7e..f99f68e1e85b 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
129 struct device_node *node = pdev->dev.of_node; 129 struct device_node *node = pdev->dev.of_node;
130 const char *oh_name; 130 const char *oh_name;
131 int oh_cnt, i, ret = 0; 131 int oh_cnt, i, ret = 0;
132 bool device_active = false;
132 133
133 oh_cnt = of_property_count_strings(node, "ti,hwmods"); 134 oh_cnt = of_property_count_strings(node, "ti,hwmods");
134 if (oh_cnt <= 0) { 135 if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
152 goto odbfd_exit1; 153 goto odbfd_exit1;
153 } 154 }
154 hwmods[i] = oh; 155 hwmods[i] = oh;
156 if (oh->flags & HWMOD_INIT_NO_IDLE)
157 device_active = true;
155 } 158 }
156 159
157 od = omap_device_alloc(pdev, hwmods, oh_cnt); 160 od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
172 175
173 pdev->dev.pm_domain = &omap_device_pm_domain; 176 pdev->dev.pm_domain = &omap_device_pm_domain;
174 177
178 if (device_active) {
179 omap_device_enable(pdev);
180 pm_runtime_set_active(&pdev->dev);
181 }
182
175odbfd_exit1: 183odbfd_exit1:
176 kfree(hwmods); 184 kfree(hwmods);
177odbfd_exit: 185odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
842{ 850{
843 struct platform_device *pdev = to_platform_device(dev); 851 struct platform_device *pdev = to_platform_device(dev);
844 struct omap_device *od = to_omap_device(pdev); 852 struct omap_device *od = to_omap_device(pdev);
853 int i;
845 854
846 if (!od) 855 if (!od)
847 return 0; 856 return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
850 * If omap_device state is enabled, but has no driver bound, 859 * If omap_device state is enabled, but has no driver bound,
851 * idle it. 860 * idle it.
852 */ 861 */
862
863 /*
864 * Some devices (like memory controllers) are always kept
865 * enabled, and should not be idled even with no drivers.
866 */
867 for (i = 0; i < od->hwmods_cnt; i++)
868 if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
869 return 0;
870
853 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { 871 if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
854 if (od->_state == OMAP_DEVICE_STATE_ENABLED) { 872 if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
855 dev_warn(dev, "%s: enabled but no driver. Idling\n", 873 dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f56..7f4db12b1459 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
2386 2386
2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); 2387 np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
2388 if (np) 2388 if (np)
2389 va_start = of_iomap(np, 0); 2389 va_start = of_iomap(np, oh->mpu_rt_idx);
2390 } else { 2390 } else {
2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); 2391 va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
2392 } 2392 }
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd814c0..e1482a9b3bc2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
95#define MODULEMODE_HWCTRL 1 95#define MODULEMODE_HWCTRL 1
96#define MODULEMODE_SWCTRL 2 96#define MODULEMODE_SWCTRL 2
97 97
98#define DEBUG_OMAP2UART1_FLAGS 0
99#define DEBUG_OMAP2UART2_FLAGS 0
100#define DEBUG_OMAP2UART3_FLAGS 0
101#define DEBUG_OMAP3UART3_FLAGS 0
102#define DEBUG_OMAP3UART4_FLAGS 0
103#define DEBUG_OMAP4UART3_FLAGS 0
104#define DEBUG_OMAP4UART4_FLAGS 0
105#define DEBUG_TI81XXUART1_FLAGS 0
106#define DEBUG_TI81XXUART2_FLAGS 0
107#define DEBUG_TI81XXUART3_FLAGS 0
108#define DEBUG_AM33XXUART1_FLAGS 0
109
110#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
111
112#if defined(CONFIG_DEBUG_OMAP2UART1)
113#undef DEBUG_OMAP2UART1_FLAGS
114#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
115#elif defined(CONFIG_DEBUG_OMAP2UART2)
116#undef DEBUG_OMAP2UART2_FLAGS
117#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
118#elif defined(CONFIG_DEBUG_OMAP2UART3)
119#undef DEBUG_OMAP2UART3_FLAGS
120#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
121#elif defined(CONFIG_DEBUG_OMAP3UART3)
122#undef DEBUG_OMAP3UART3_FLAGS
123#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
124#elif defined(CONFIG_DEBUG_OMAP3UART4)
125#undef DEBUG_OMAP3UART4_FLAGS
126#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
127#elif defined(CONFIG_DEBUG_OMAP4UART3)
128#undef DEBUG_OMAP4UART3_FLAGS
129#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
130#elif defined(CONFIG_DEBUG_OMAP4UART4)
131#undef DEBUG_OMAP4UART4_FLAGS
132#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
133#elif defined(CONFIG_DEBUG_TI81XXUART1)
134#undef DEBUG_TI81XXUART1_FLAGS
135#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
136#elif defined(CONFIG_DEBUG_TI81XXUART2)
137#undef DEBUG_TI81XXUART2_FLAGS
138#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
139#elif defined(CONFIG_DEBUG_TI81XXUART3)
140#undef DEBUG_TI81XXUART3_FLAGS
141#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
142#elif defined(CONFIG_DEBUG_AM33XXUART1)
143#undef DEBUG_AM33XXUART1_FLAGS
144#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
145#endif
98 146
99/** 147/**
100 * struct omap_hwmod_mux_info - hwmod specific mux configuration 148 * struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
568 * @voltdm: pointer to voltage domain (filled in at runtime) 616 * @voltdm: pointer to voltage domain (filled in at runtime)
569 * @dev_attr: arbitrary device attributes that can be passed to the driver 617 * @dev_attr: arbitrary device attributes that can be passed to the driver
570 * @_sysc_cache: internal-use hwmod flags 618 * @_sysc_cache: internal-use hwmod flags
619 * @mpu_rt_idx: index of device address space for register target (for DT boot)
571 * @_mpu_rt_va: cached register target start address (internal use) 620 * @_mpu_rt_va: cached register target start address (internal use)
572 * @_mpu_port: cached MPU register target slave (internal use) 621 * @_mpu_port: cached MPU register target slave (internal use)
573 * @opt_clks_cnt: number of @opt_clks 622 * @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
617 struct list_head node; 666 struct list_head node;
618 struct omap_hwmod_ocp_if *_mpu_port; 667 struct omap_hwmod_ocp_if *_mpu_port;
619 u16 flags; 668 u16 flags;
669 u8 mpu_rt_idx;
620 u8 response_lat; 670 u8 response_lat;
621 u8 rst_lines_cnt; 671 u8 rst_lines_cnt;
622 u8 opt_clks_cnt; 672 u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b54567..56cebb05509e 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
512 .mpu_irqs = omap2_uart1_mpu_irqs, 512 .mpu_irqs = omap2_uart1_mpu_irqs,
513 .sdma_reqs = omap2_uart1_sdma_reqs, 513 .sdma_reqs = omap2_uart1_sdma_reqs,
514 .main_clk = "uart1_fck", 514 .main_clk = "uart1_fck",
515 .flags = HWMOD_SWSUP_SIDLE_ACT, 515 .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
516 .prcm = { 516 .prcm = {
517 .omap2 = { 517 .omap2 = {
518 .module_offs = CORE_MOD, 518 .module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
532 .mpu_irqs = omap2_uart2_mpu_irqs, 532 .mpu_irqs = omap2_uart2_mpu_irqs,
533 .sdma_reqs = omap2_uart2_sdma_reqs, 533 .sdma_reqs = omap2_uart2_sdma_reqs,
534 .main_clk = "uart2_fck", 534 .main_clk = "uart2_fck",
535 .flags = HWMOD_SWSUP_SIDLE_ACT, 535 .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
536 .prcm = { 536 .prcm = {
537 .omap2 = { 537 .omap2 = {
538 .module_offs = CORE_MOD, 538 .module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
552 .mpu_irqs = omap2_uart3_mpu_irqs, 552 .mpu_irqs = omap2_uart3_mpu_irqs,
553 .sdma_reqs = omap2_uart3_sdma_reqs, 553 .sdma_reqs = omap2_uart3_sdma_reqs,
554 .main_clk = "uart3_fck", 554 .main_clk = "uart3_fck",
555 .flags = HWMOD_SWSUP_SIDLE_ACT, 555 .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
556 .prcm = { 556 .prcm = {
557 .omap2 = { 557 .omap2 = {
558 .module_offs = CORE_MOD, 558 .module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56346a9..eb2f3b93b51c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
562 .clkdm_name = "cpsw_125mhz_clkdm", 562 .clkdm_name = "cpsw_125mhz_clkdm",
563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), 563 .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
564 .main_clk = "cpsw_125mhz_gclk", 564 .main_clk = "cpsw_125mhz_gclk",
565 .mpu_rt_idx = 1,
565 .prcm = { 566 .prcm = {
566 .omap4 = { 567 .omap4 = {
567 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, 568 .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
1512 .name = "uart1", 1513 .name = "uart1",
1513 .class = &uart_class, 1514 .class = &uart_class,
1514 .clkdm_name = "l4_wkup_clkdm", 1515 .clkdm_name = "l4_wkup_clkdm",
1515 .flags = HWMOD_SWSUP_SIDLE_ACT, 1516 .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
1516 .main_clk = "dpll_per_m2_div4_wkupdm_ck", 1517 .main_clk = "dpll_per_m2_div4_wkupdm_ck",
1517 .prcm = { 1518 .prcm = {
1518 .omap4 = { 1519 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2fb579..0c3a427da544 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
490 .mpu_irqs = omap2_uart1_mpu_irqs, 490 .mpu_irqs = omap2_uart1_mpu_irqs,
491 .sdma_reqs = omap2_uart1_sdma_reqs, 491 .sdma_reqs = omap2_uart1_sdma_reqs,
492 .main_clk = "uart1_fck", 492 .main_clk = "uart1_fck",
493 .flags = HWMOD_SWSUP_SIDLE_ACT, 493 .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
494 .prcm = { 494 .prcm = {
495 .omap2 = { 495 .omap2 = {
496 .module_offs = CORE_MOD, 496 .module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
509 .mpu_irqs = omap2_uart2_mpu_irqs, 509 .mpu_irqs = omap2_uart2_mpu_irqs,
510 .sdma_reqs = omap2_uart2_sdma_reqs, 510 .sdma_reqs = omap2_uart2_sdma_reqs,
511 .main_clk = "uart2_fck", 511 .main_clk = "uart2_fck",
512 .flags = HWMOD_SWSUP_SIDLE_ACT, 512 .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
513 .prcm = { 513 .prcm = {
514 .omap2 = { 514 .omap2 = {
515 .module_offs = CORE_MOD, 515 .module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
528 .mpu_irqs = omap2_uart3_mpu_irqs, 528 .mpu_irqs = omap2_uart3_mpu_irqs,
529 .sdma_reqs = omap2_uart3_sdma_reqs, 529 .sdma_reqs = omap2_uart3_sdma_reqs,
530 .main_clk = "uart3_fck", 530 .main_clk = "uart3_fck",
531 .flags = HWMOD_SWSUP_SIDLE_ACT, 531 .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
532 HWMOD_SWSUP_SIDLE_ACT,
532 .prcm = { 533 .prcm = {
533 .omap2 = { 534 .omap2 = {
534 .module_offs = OMAP3430_PER_MOD, 535 .module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
558 .mpu_irqs = uart4_mpu_irqs, 559 .mpu_irqs = uart4_mpu_irqs,
559 .sdma_reqs = uart4_sdma_reqs, 560 .sdma_reqs = uart4_sdma_reqs,
560 .main_clk = "uart4_fck", 561 .main_clk = "uart4_fck",
561 .flags = HWMOD_SWSUP_SIDLE_ACT, 562 .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
562 .prcm = { 563 .prcm = {
563 .omap2 = { 564 .omap2 = {
564 .module_offs = OMAP3430_PER_MOD, 565 .module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e60fdbe..9c3b504477d7 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
2858 .name = "uart3", 2858 .name = "uart3",
2859 .class = &omap44xx_uart_hwmod_class, 2859 .class = &omap44xx_uart_hwmod_class,
2860 .clkdm_name = "l4_per_clkdm", 2860 .clkdm_name = "l4_per_clkdm",
2861 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | 2861 .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2862 HWMOD_SWSUP_SIDLE_ACT,
2863 .main_clk = "func_48m_fclk", 2862 .main_clk = "func_48m_fclk",
2864 .prcm = { 2863 .prcm = {
2865 .omap4 = { 2864 .omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
2875 .name = "uart4", 2874 .name = "uart4",
2876 .class = &omap44xx_uart_hwmod_class, 2875 .class = &omap44xx_uart_hwmod_class,
2877 .clkdm_name = "l4_per_clkdm", 2876 .clkdm_name = "l4_per_clkdm",
2878 .flags = HWMOD_SWSUP_SIDLE_ACT, 2877 .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
2879 .main_clk = "func_48m_fclk", 2878 .main_clk = "func_48m_fclk",
2880 .prcm = { 2879 .prcm = {
2881 .omap4 = { 2880 .omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 5c6bbe5b7fb6..b4d04748576b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1374,7 +1374,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
1374 .name = "uart3", 1374 .name = "uart3",
1375 .class = &omap54xx_uart_hwmod_class, 1375 .class = &omap54xx_uart_hwmod_class,
1376 .clkdm_name = "l4per_clkdm", 1376 .clkdm_name = "l4per_clkdm",
1377 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, 1377 .flags = DEBUG_OMAP4UART3_FLAGS,
1378 .main_clk = "func_48m_fclk", 1378 .main_clk = "func_48m_fclk",
1379 .prcm = { 1379 .prcm = {
1380 .omap4 = { 1380 .omap4 = {
@@ -1390,6 +1390,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
1390 .name = "uart4", 1390 .name = "uart4",
1391 .class = &omap54xx_uart_hwmod_class, 1391 .class = &omap54xx_uart_hwmod_class,
1392 .clkdm_name = "l4per_clkdm", 1392 .clkdm_name = "l4per_clkdm",
1393 .flags = DEBUG_OMAP4UART4_FLAGS,
1393 .main_clk = "func_48m_fclk", 1394 .main_clk = "func_48m_fclk",
1394 .prcm = { 1395 .prcm = {
1395 .omap4 = { 1396 .omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de6cb63..a388f8c1bcb3 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated", 208 pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
209 uart_name, uart->num); 209 uart_name, uart->num);
210 } 210 }
211
212 /*
213 * omap-uart can be used for earlyprintk logs
214 * So if omap-uart is used as console then prevent
215 * uart reset and idle to get logs from omap-uart
216 * until uart console driver is available to take
217 * care for console messages.
218 * Idling or resetting omap-uart while printing logs
219 * early boot logs can stall the boot-up.
220 */
221 oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
222 } 211 }
223 } while (1); 212 } while (1);
224 213
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 5a9ee0b48b62..801287ee4d98 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -537,7 +537,7 @@ static void __init realtime_counter_init(void)
537 reg |= num; 537 reg |= num;
538 __raw_writel(reg, base + INCREMENTER_NUMERATOR_OFFSET); 538 __raw_writel(reg, base + INCREMENTER_NUMERATOR_OFFSET);
539 539
540 reg = __raw_readl(base + INCREMENTER_NUMERATOR_OFFSET) & 540 reg = __raw_readl(base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET) &
541 NUMERATOR_DENUMERATOR_MASK; 541 NUMERATOR_DENUMERATOR_MASK;
542 reg |= den; 542 reg |= den;
543 __raw_writel(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET); 543 __raw_writel(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf2..bc897231bd10 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
38}; 38};
39 39
40static struct musb_hdrc_platform_data musb_plat = { 40static struct musb_hdrc_platform_data musb_plat = {
41#ifdef CONFIG_USB_GADGET_MUSB_HDRC
42 .mode = MUSB_OTG, 41 .mode = MUSB_OTG,
43#else 42
44 .mode = MUSB_HOST,
45#endif
46 /* .clock is set dynamically */ 43 /* .clock is set dynamically */
47 .config = &musb_config, 44 .config = &musb_config,
48 45
diff --git a/arch/arm/mach-orion5x/include/mach/debug-macro.S b/arch/arm/mach-orion5x/include/mach/debug-macro.S
deleted file mode 100644
index f340ed8f8dd0..000000000000
--- a/arch/arm/mach-orion5x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * arch/arm/mach-orion5x/include/mach/debug-macro.S
3 *
4 * Debugging macro include header
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9*/
10
11#include <mach/orion5x.h>
12
13 .macro addruart, rp, rv, tmp
14 ldr \rp, =ORION5X_REGS_PHYS_BASE
15 ldr \rv, =ORION5X_REGS_VIRT_BASE
16 orr \rp, \rp, #0x00012000
17 orr \rv, \rv, #0x00012000
18 .endm
19
20#define UART_SHIFT 2
21#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 2c70f74fed5d..e110b6d4ae8c 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
42 42
43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") 43DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
44 /* Maintainer: Barry Song <baohua.song@csr.com> */ 44 /* Maintainer: Barry Song <baohua.song@csr.com> */
45 .nr_irqs = 128,
46 .map_io = sirfsoc_map_io, 45 .map_io = sirfsoc_map_io,
47 .init_time = sirfsoc_init_time, 46 .init_time = sirfsoc_init_time,
48 .init_late = sirfsoc_init_late, 47 .init_late = sirfsoc_init_late,
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
59 58
60DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") 59DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
61 /* Maintainer: Barry Song <baohua.song@csr.com> */ 60 /* Maintainer: Barry Song <baohua.song@csr.com> */
62 .nr_irqs = 128,
63 .map_io = sirfsoc_map_io, 61 .map_io = sirfsoc_map_io,
64 .init_time = sirfsoc_init_time, 62 .init_time = sirfsoc_init_time,
65 .dma_zone_size = SZ_256M, 63 .dma_zone_size = SZ_256M,
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index fe31bfcbb8df..c98511c5abd1 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,9 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
73 73
74static struct mcp251x_platform_data mcp251x_info = { 74static struct mcp251x_platform_data mcp251x_info = {
75 .oscillator_frequency = 16E6, 75 .oscillator_frequency = 16E6,
76 .board_specific_setup = NULL,
77 .power_enable = NULL,
78 .transceiver_enable = NULL
79}; 76};
80 77
81static struct spi_board_info mcp251x_board_info[] = { 78static struct spi_board_info mcp251x_board_info[] = {
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 126e4a806a07..b19d1c361cab 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -29,6 +29,8 @@
29#include <linux/platform_data/pca953x.h> 29#include <linux/platform_data/pca953x.h>
30#include <linux/apm-emulation.h> 30#include <linux/apm-emulation.h>
31#include <linux/can/platform/mcp251x.h> 31#include <linux/can/platform/mcp251x.h>
32#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h>
32 34
33#include <asm/mach-types.h> 35#include <asm/mach-types.h>
34#include <asm/suspend.h> 36#include <asm/suspend.h>
@@ -391,33 +393,34 @@ static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
391}; 393};
392 394
393/* CAN bus on SPI */ 395/* CAN bus on SPI */
394static int zeus_mcp2515_setup(struct spi_device *sdev) 396static struct regulator_consumer_supply can_regulator_consumer =
395{ 397 REGULATOR_SUPPLY("vdd", "spi3.0");
396 int err;
397
398 err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown");
399 if (err)
400 return err;
401 398
402 err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1); 399static struct regulator_init_data can_regulator_init_data = {
403 if (err) { 400 .constraints = {
404 gpio_free(ZEUS_CAN_SHDN_GPIO); 401 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
405 return err; 402 },
406 } 403 .consumer_supplies = &can_regulator_consumer,
404 .num_consumer_supplies = 1,
405};
407 406
408 return 0; 407static struct fixed_voltage_config can_regulator_pdata = {
409} 408 .supply_name = "CAN_SHDN",
409 .microvolts = 3300000,
410 .gpio = ZEUS_CAN_SHDN_GPIO,
411 .init_data = &can_regulator_init_data,
412};
410 413
411static int zeus_mcp2515_transceiver_enable(int enable) 414static struct platform_device can_regulator_device = {
412{ 415 .name = "reg-fixed-volage",
413 gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable); 416 .id = -1,
414 return 0; 417 .dev = {
415} 418 .platform_data = &can_regulator_pdata,
419 },
420};
416 421
417static struct mcp251x_platform_data zeus_mcp2515_pdata = { 422static struct mcp251x_platform_data zeus_mcp2515_pdata = {
418 .oscillator_frequency = 16*1000*1000, 423 .oscillator_frequency = 16*1000*1000,
419 .board_specific_setup = zeus_mcp2515_setup,
420 .power_enable = zeus_mcp2515_transceiver_enable,
421}; 424};
422 425
423static struct spi_board_info zeus_spi_board_info[] = { 426static struct spi_board_info zeus_spi_board_info[] = {
@@ -516,6 +519,7 @@ static struct platform_device *zeus_devices[] __initdata = {
516 &zeus_leds_device, 519 &zeus_leds_device,
517 &zeus_pcmcia_device, 520 &zeus_pcmcia_device,
518 &zeus_max6369_device, 521 &zeus_max6369_device,
522 &can_regulator_device,
519}; 523};
520 524
521/* AC'97 */ 525/* AC'97 */
diff --git a/arch/arm/mach-realview/include/mach/debug-macro.S b/arch/arm/mach-realview/include/mach/debug-macro.S
deleted file mode 100644
index 8cc372dc66a8..000000000000
--- a/arch/arm/mach-realview/include/mach/debug-macro.S
+++ /dev/null
@@ -1,29 +0,0 @@
1/* arch/arm/mach-realview/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifdef CONFIG_DEBUG_REALVIEW_STD_PORT
14#define DEBUG_LL_UART_OFFSET 0x00009000
15#elif defined(CONFIG_DEBUG_REALVIEW_PB1176_PORT)
16#define DEBUG_LL_UART_OFFSET 0x0010c000
17#endif
18
19#ifndef DEBUG_LL_UART_OFFSET
20#error "Unknown RealView platform"
21#endif
22
23 .macro addruart, rp, rv, tmp
24 mov \rp, #DEBUG_LL_UART_OFFSET
25 orr \rv, \rp, #0xfb000000 @ virtual base
26 orr \rp, \rp, #0x10000000 @ physical base
27 .endm
28
29#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-rpc/include/mach/debug-macro.S b/arch/arm/mach-rpc/include/mach/debug-macro.S
deleted file mode 100644
index 6d28cc99b124..000000000000
--- a/arch/arm/mach-rpc/include/mach/debug-macro.S
+++ /dev/null
@@ -1,23 +0,0 @@
1/* arch/arm/mach-rpc/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0x00010000
16 orr \rp, \rp, #0x00000fe0
17 orr \rv, \rp, #0xe0000000 @ virtual
18 orr \rp, \rp, #0x03000000 @ physical
19 .endm
20
21#define UART_SHIFT 2
22#define FLOW_CONTROL
23#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
index 03b85fec2ddb..8f677df2d4c4 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva-reference.c
@@ -190,10 +190,10 @@ static void __init eva_init(void)
190} 190}
191 191
192#define RESCNT2 IOMEM(0xe6188020) 192#define RESCNT2 IOMEM(0xe6188020)
193static void eva_restart(char mode, const char *cmd) 193static void eva_restart(enum reboot_mode mode, const char *cmd)
194{ 194{
195 /* Do soft power on reset */ 195 /* Do soft power on reset */
196 writel((1 << 31), RESCNT2); 196 writel(1 << 31, RESCNT2);
197} 197}
198 198
199static const char *eva_boards_compat_dt[] __initdata = { 199static const char *eva_boards_compat_dt[] __initdata = {
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 66cfd5686578..4e3670a28a7c 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = {
358static struct sh_eth_plat_data sh_eth_platdata = { 358static struct sh_eth_plat_data sh_eth_platdata = {
359 .phy = 0x00, /* LAN8710A */ 359 .phy = 0x00, /* LAN8710A */
360 .edmac_endian = EDMAC_LITTLE_ENDIAN, 360 .edmac_endian = EDMAC_LITTLE_ENDIAN,
361 .register_type = SH_ETH_REG_GIGABIT,
362 .phy_interface = PHY_INTERFACE_MODE_MII, 361 .phy_interface = PHY_INTERFACE_MODE_MII,
363}; 362};
364 363
@@ -1153,9 +1152,6 @@ static void __init eva_init(void)
1153 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ 1152 gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
1154 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ 1153 gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
1155 1154
1156 /* Touchscreen */
1157 gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
1158
1159 /* GETHER */ 1155 /* GETHER */
1160 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ 1156 gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
1161 1157
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d5554646916c..35dd7f201a16 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2013 Renesas Solutions Corp. 4 * Copyright (C) 2013 Renesas Solutions Corp.
5 * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> 5 * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6 * Copyright (C) 2013 Cogent Embedded, Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -28,6 +29,7 @@
28#include <linux/smsc911x.h> 29#include <linux/smsc911x.h>
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
30#include <linux/spi/flash.h> 31#include <linux/spi/flash.h>
32#include <media/soc_camera.h>
31#include <mach/common.h> 33#include <mach/common.h>
32#include <mach/irqs.h> 34#include <mach/irqs.h>
33#include <mach/r8a7778.h> 35#include <mach/r8a7778.h>
@@ -89,7 +91,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
89static struct sh_eth_plat_data ether_platform_data __initdata = { 91static struct sh_eth_plat_data ether_platform_data __initdata = {
90 .phy = 0x01, 92 .phy = 0x01,
91 .edmac_endian = EDMAC_LITTLE_ENDIAN, 93 .edmac_endian = EDMAC_LITTLE_ENDIAN,
92 .register_type = SH_ETH_REG_FAST_RCAR,
93 .phy_interface = PHY_INTERFACE_MODE_RMII, 94 .phy_interface = PHY_INTERFACE_MODE_RMII,
94 /* 95 /*
95 * Although the LINK signal is available on the board, it's connected to 96 * Although the LINK signal is available on the board, it's connected to
@@ -143,6 +144,25 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
143 MMC_CAP_NEEDS_POLL, 144 MMC_CAP_NEEDS_POLL,
144}; 145};
145 146
147static struct rcar_vin_platform_data vin_platform_data __initdata = {
148 .flags = RCAR_VIN_BT656,
149};
150
151/* In the default configuration both decoders reside on I2C bus 0 */
152#define BOCKW_CAMERA(idx) \
153static struct i2c_board_info camera##idx##_info = { \
154 I2C_BOARD_INFO("ml86v7667", 0x41 + 2 * (idx)), \
155}; \
156 \
157static struct soc_camera_link iclink##idx##_ml86v7667 __initdata = { \
158 .bus_id = idx, \
159 .i2c_adapter_id = 0, \
160 .board_info = &camera##idx##_info, \
161}
162
163BOCKW_CAMERA(0);
164BOCKW_CAMERA(1);
165
146static const struct pinctrl_map bockw_pinctrl_map[] = { 166static const struct pinctrl_map bockw_pinctrl_map[] = {
147 /* Ether */ 167 /* Ether */
148 PIN_MAP_MUX_GROUP_DEFAULT("r8a777x-ether", "pfc-r8a7778", 168 PIN_MAP_MUX_GROUP_DEFAULT("r8a777x-ether", "pfc-r8a7778",
@@ -167,7 +187,23 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
167 "usb1", "usb1"), 187 "usb1", "usb1"),
168 /* SDHI0 */ 188 /* SDHI0 */
169 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", 189 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
170 "sdhi0", "sdhi0"), 190 "sdhi0_data4", "sdhi0"),
191 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
192 "sdhi0_ctrl", "sdhi0"),
193 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
194 "sdhi0_cd", "sdhi0"),
195 PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
196 "sdhi0_wp", "sdhi0"),
197 /* VIN0 */
198 PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.0", "pfc-r8a7778",
199 "vin0_clk", "vin0"),
200 PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.0", "pfc-r8a7778",
201 "vin0_data8", "vin0"),
202 /* VIN1 */
203 PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.1", "pfc-r8a7778",
204 "vin1_clk", "vin1"),
205 PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.1", "pfc-r8a7778",
206 "vin1_data8", "vin1"),
171}; 207};
172 208
173#define FPGA 0x18200000 209#define FPGA 0x18200000
@@ -186,6 +222,16 @@ static void __init bockw_init(void)
186 r8a7778_add_i2c_device(0); 222 r8a7778_add_i2c_device(0);
187 r8a7778_add_hspi_device(0); 223 r8a7778_add_hspi_device(0);
188 r8a7778_add_mmc_device(&sh_mmcif_plat); 224 r8a7778_add_mmc_device(&sh_mmcif_plat);
225 r8a7778_add_vin_device(0, &vin_platform_data);
226 /* VIN1 has a pin conflict with Ether */
227 if (!IS_ENABLED(CONFIG_SH_ETH))
228 r8a7778_add_vin_device(1, &vin_platform_data);
229 platform_device_register_data(&platform_bus, "soc-camera-pdrv", 0,
230 &iclink0_ml86v7667,
231 sizeof(iclink0_ml86v7667));
232 platform_device_register_data(&platform_bus, "soc-camera-pdrv", 1,
233 &iclink1_ml86v7667,
234 sizeof(iclink1_ml86v7667));
189 235
190 i2c_register_board_info(0, i2c0_devices, 236 i2c_register_board_info(0, i2c0_devices,
191 ARRAY_SIZE(i2c0_devices)); 237 ARRAY_SIZE(i2c0_devices));
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index 1e99b17767bb..78d92d34665d 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -58,7 +58,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
58#define GPIO_KEY(c, g, d, ...) \ 58#define GPIO_KEY(c, g, d, ...) \
59 { .code = c, .gpio = g, .desc = d, .active_low = 1 } 59 { .code = c, .gpio = g, .desc = d, .active_low = 1 }
60 60
61static __initdata struct gpio_keys_button gpio_buttons[] = { 61static struct gpio_keys_button gpio_buttons[] = {
62 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), 62 GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
63 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), 63 GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
64 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), 64 GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index a7d1010505bf..ca7fb2e63c60 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -1,8 +1,9 @@
1/* 1/*
2 * marzen board support 2 * marzen board support
3 * 3 *
4 * Copyright (C) 2011 Renesas Solutions Corp. 4 * Copyright (C) 2011, 2013 Renesas Solutions Corp.
5 * Copyright (C) 2011 Magnus Damm 5 * Copyright (C) 2011 Magnus Damm
6 * Copyright (C) 2013 Cogent Embedded, Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
@@ -37,6 +38,7 @@
37#include <linux/mmc/host.h> 38#include <linux/mmc/host.h>
38#include <linux/mmc/sh_mobile_sdhi.h> 39#include <linux/mmc/sh_mobile_sdhi.h>
39#include <linux/mfd/tmio.h> 40#include <linux/mfd/tmio.h>
41#include <media/soc_camera.h>
40#include <mach/hardware.h> 42#include <mach/hardware.h>
41#include <mach/r8a7779.h> 43#include <mach/r8a7779.h>
42#include <mach/common.h> 44#include <mach/common.h>
@@ -178,12 +180,40 @@ static struct platform_device leds_device = {
178 }, 180 },
179}; 181};
180 182
183static struct rcar_vin_platform_data vin_platform_data __initdata = {
184 .flags = RCAR_VIN_BT656,
185};
186
187#define MARZEN_CAMERA(idx) \
188static struct i2c_board_info camera##idx##_info = { \
189 I2C_BOARD_INFO("adv7180", 0x20 + (idx)), \
190}; \
191 \
192static struct soc_camera_link iclink##idx##_adv7180 = { \
193 .bus_id = 1 + 2 * (idx), \
194 .i2c_adapter_id = 0, \
195 .board_info = &camera##idx##_info, \
196}; \
197 \
198static struct platform_device camera##idx##_device = { \
199 .name = "soc-camera-pdrv", \
200 .id = idx, \
201 .dev = { \
202 .platform_data = &iclink##idx##_adv7180, \
203 }, \
204};
205
206MARZEN_CAMERA(0);
207MARZEN_CAMERA(1);
208
181static struct platform_device *marzen_devices[] __initdata = { 209static struct platform_device *marzen_devices[] __initdata = {
182 &eth_device, 210 &eth_device,
183 &sdhi0_device, 211 &sdhi0_device,
184 &thermal_device, 212 &thermal_device,
185 &hspi_device, 213 &hspi_device,
186 &leds_device, 214 &leds_device,
215 &camera0_device,
216 &camera1_device,
187}; 217};
188 218
189static const struct pinctrl_map marzen_pinctrl_map[] = { 219static const struct pinctrl_map marzen_pinctrl_map[] = {
@@ -219,6 +249,16 @@ static const struct pinctrl_map marzen_pinctrl_map[] = {
219 /* USB2 */ 249 /* USB2 */
220 PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.1", "pfc-r8a7779", 250 PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.1", "pfc-r8a7779",
221 "usb2", "usb2"), 251 "usb2", "usb2"),
252 /* VIN1 */
253 PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
254 "vin1_clk", "vin1"),
255 PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
256 "vin1_data8", "vin1"),
257 /* VIN3 */
258 PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
259 "vin3_clk", "vin3"),
260 PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
261 "vin3_data8", "vin3"),
222}; 262};
223 263
224static void __init marzen_init(void) 264static void __init marzen_init(void)
@@ -235,6 +275,8 @@ static void __init marzen_init(void)
235 275
236 r8a7779_add_standard_devices(); 276 r8a7779_add_standard_devices();
237 r8a7779_add_usb_phy_device(&usb_phy_platform_data); 277 r8a7779_add_usb_phy_device(&usb_phy_platform_data);
278 r8a7779_add_vin_device(1, &vin_platform_data);
279 r8a7779_add_vin_device(3, &vin_platform_data);
238 platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices)); 280 platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
239} 281}
240 282
diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c
index de10fd78bf2b..f4265e52432c 100644
--- a/arch/arm/mach-shmobile/clock-r8a7740.c
+++ b/arch/arm/mach-shmobile/clock-r8a7740.c
@@ -596,7 +596,7 @@ static struct clk_lookup lookups[] = {
596 CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]), 596 CLKDEV_DEV_ID("e6bd0000.mmcif", &mstp_clks[MSTP312]),
597 CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]), 597 CLKDEV_DEV_ID("r8a7740-gether", &mstp_clks[MSTP309]),
598 CLKDEV_DEV_ID("e9a00000.sh-eth", &mstp_clks[MSTP309]), 598 CLKDEV_DEV_ID("e9a00000.sh-eth", &mstp_clks[MSTP309]),
599 CLKDEV_DEV_ID("renesas_tpu_pwm", &mstp_clks[MSTP304]), 599 CLKDEV_DEV_ID("renesas-tpu-pwm", &mstp_clks[MSTP304]),
600 600
601 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]), 601 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP415]),
602 CLKDEV_DEV_ID("e6870000.sdhi", &mstp_clks[MSTP415]), 602 CLKDEV_DEV_ID("e6870000.sdhi", &mstp_clks[MSTP415]),
diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c
index a0e9eb72e46d..c4bf2d8fb111 100644
--- a/arch/arm/mach-shmobile/clock-r8a7778.c
+++ b/arch/arm/mach-shmobile/clock-r8a7778.c
@@ -106,6 +106,7 @@ enum {
106 MSTP331, 106 MSTP331,
107 MSTP323, MSTP322, MSTP321, 107 MSTP323, MSTP322, MSTP321,
108 MSTP114, 108 MSTP114,
109 MSTP110, MSTP109,
109 MSTP100, 110 MSTP100,
110 MSTP030, 111 MSTP030,
111 MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021, 112 MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
@@ -119,6 +120,8 @@ static struct clk mstp_clks[MSTP_NR] = {
119 [MSTP322] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 22, 0), /* SDHI1 */ 120 [MSTP322] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 22, 0), /* SDHI1 */
120 [MSTP321] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 21, 0), /* SDHI2 */ 121 [MSTP321] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 21, 0), /* SDHI2 */
121 [MSTP114] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 14, 0), /* Ether */ 122 [MSTP114] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 14, 0), /* Ether */
123 [MSTP110] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 10, 0), /* VIN0 */
124 [MSTP109] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 9, 0), /* VIN1 */
122 [MSTP100] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 0, 0), /* USB0/1 */ 125 [MSTP100] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 0, 0), /* USB0/1 */
123 [MSTP030] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 30, 0), /* I2C0 */ 126 [MSTP030] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 30, 0), /* I2C0 */
124 [MSTP029] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 29, 0), /* I2C1 */ 127 [MSTP029] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 29, 0), /* I2C1 */
@@ -146,6 +149,8 @@ static struct clk_lookup lookups[] = {
146 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */ 149 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */
147 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */ 150 CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */
148 CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ 151 CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
152 CLKDEV_DEV_ID("r8a7778-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
153 CLKDEV_DEV_ID("r8a7778-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
149 CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ 154 CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
150 CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */ 155 CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */
151 CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */ 156 CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */
diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c
index 10340f5becbb..bd6ad922eb7e 100644
--- a/arch/arm/mach-shmobile/clock-r8a7779.c
+++ b/arch/arm/mach-shmobile/clock-r8a7779.c
@@ -112,7 +112,9 @@ static struct clk *main_clks[] = {
112}; 112};
113 113
114enum { MSTP323, MSTP322, MSTP321, MSTP320, 114enum { MSTP323, MSTP322, MSTP321, MSTP320,
115 MSTP120,
115 MSTP116, MSTP115, MSTP114, 116 MSTP116, MSTP115, MSTP114,
117 MSTP110, MSTP109, MSTP108,
116 MSTP103, MSTP101, MSTP100, 118 MSTP103, MSTP101, MSTP100,
117 MSTP030, 119 MSTP030,
118 MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021, 120 MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
@@ -125,9 +127,13 @@ static struct clk mstp_clks[MSTP_NR] = {
125 [MSTP322] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 22, 0), /* SDHI1 */ 127 [MSTP322] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 22, 0), /* SDHI1 */
126 [MSTP321] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 21, 0), /* SDHI2 */ 128 [MSTP321] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 21, 0), /* SDHI2 */
127 [MSTP320] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 20, 0), /* SDHI3 */ 129 [MSTP320] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 20, 0), /* SDHI3 */
130 [MSTP120] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 20, 0), /* VIN3 */
128 [MSTP116] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 16, 0), /* PCIe */ 131 [MSTP116] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 16, 0), /* PCIe */
129 [MSTP115] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 15, 0), /* SATA */ 132 [MSTP115] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 15, 0), /* SATA */
130 [MSTP114] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 14, 0), /* Ether */ 133 [MSTP114] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 14, 0), /* Ether */
134 [MSTP110] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 10, 0), /* VIN0 */
135 [MSTP109] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 9, 0), /* VIN1 */
136 [MSTP108] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 8, 0), /* VIN2 */
131 [MSTP103] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 3, 0), /* DU */ 137 [MSTP103] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 3, 0), /* DU */
132 [MSTP101] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 1, 0), /* USB2 */ 138 [MSTP101] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 1, 0), /* USB2 */
133 [MSTP100] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 0, 0), /* USB0/1 */ 139 [MSTP100] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 0, 0), /* USB0/1 */
@@ -162,10 +168,14 @@ static struct clk_lookup lookups[] = {
162 CLKDEV_CON_ID("peripheral_clk", &clkp_clk), 168 CLKDEV_CON_ID("peripheral_clk", &clkp_clk),
163 169
164 /* MSTP32 clocks */ 170 /* MSTP32 clocks */
171 CLKDEV_DEV_ID("r8a7779-vin.3", &mstp_clks[MSTP120]), /* VIN3 */
165 CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */ 172 CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */
166 CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */ 173 CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */
167 CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */ 174 CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */
168 CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */ 175 CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
176 CLKDEV_DEV_ID("r8a7779-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
177 CLKDEV_DEV_ID("r8a7779-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
178 CLKDEV_DEV_ID("r8a7779-vin.2", &mstp_clks[MSTP108]), /* VIN2 */
169 CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */ 179 CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */
170 CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */ 180 CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */
171 CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */ 181 CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index bfd920083a3b..f45dde701d7b 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -37,13 +37,15 @@ ENTRY(shmobile_boot_scu)
37 lsl r1, r1, #3 @ we will shift by cpu_id * 8 bits 37 lsl r1, r1, #3 @ we will shift by cpu_id * 8 bits
38 ldr r2, [r0, #8] @ SCU Power Status Register 38 ldr r2, [r0, #8] @ SCU Power Status Register
39 mov r3, #3 39 mov r3, #3
40 bic r2, r2, r3, lsl r1 @ Clear bits of our CPU (Run Mode) 40 lsl r3, r3, r1
41 bic r2, r2, r3 @ Clear bits of our CPU (Run Mode)
41 str r2, [r0, #8] @ write back 42 str r2, [r0, #8] @ write back
42 43
43 b shmobile_invalidate_start 44 b shmobile_invalidate_start
44ENDPROC(shmobile_boot_scu) 45ENDPROC(shmobile_boot_scu)
45 46
46 .text 47 .text
48 .align 2
47 .globl shmobile_scu_base 49 .globl shmobile_scu_base
48shmobile_scu_base: 50shmobile_scu_base:
49 .space 4 51 .space 4
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index a9d212498987..2667db806c39 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,12 +24,16 @@ ENDPROC(shmobile_invalidate_start)
24 * This will be mapped at address 0 by SBAR register. 24 * This will be mapped at address 0 by SBAR register.
25 * We need _long_ jump to the physical address. 25 * We need _long_ jump to the physical address.
26 */ 26 */
27 .arm
27 .align 12 28 .align 12
28ENTRY(shmobile_boot_vector) 29ENTRY(shmobile_boot_vector)
29 ldr r0, 2f 30 ldr r0, 2f
30 ldr pc, 1f 31 ldr r1, 1f
32 bx r1
33
31ENDPROC(shmobile_boot_vector) 34ENDPROC(shmobile_boot_vector)
32 35
36 .align 2
33 .globl shmobile_boot_fn 37 .globl shmobile_boot_fn
34shmobile_boot_fn: 38shmobile_boot_fn:
351: .space 4 391: .space 4
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7778.h b/arch/arm/mach-shmobile/include/mach/r8a7778.h
index 9b561bf4229f..2866704e7afd 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7778.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7778.h
@@ -22,6 +22,7 @@
22#include <linux/mmc/sh_mobile_sdhi.h> 22#include <linux/mmc/sh_mobile_sdhi.h>
23#include <linux/sh_eth.h> 23#include <linux/sh_eth.h>
24#include <linux/platform_data/usb-rcar-phy.h> 24#include <linux/platform_data/usb-rcar-phy.h>
25#include <linux/platform_data/camera-rcar.h>
25 26
26extern void r8a7778_add_standard_devices(void); 27extern void r8a7778_add_standard_devices(void);
27extern void r8a7778_add_standard_devices_dt(void); 28extern void r8a7778_add_standard_devices_dt(void);
@@ -30,6 +31,8 @@ extern void r8a7778_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
30extern void r8a7778_add_i2c_device(int id); 31extern void r8a7778_add_i2c_device(int id);
31extern void r8a7778_add_hspi_device(int id); 32extern void r8a7778_add_hspi_device(int id);
32extern void r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info); 33extern void r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info);
34extern void r8a7778_add_vin_device(int id,
35 struct rcar_vin_platform_data *pdata);
33 36
34extern void r8a7778_init_late(void); 37extern void r8a7778_init_late(void);
35extern void r8a7778_init_delay(void); 38extern void r8a7778_init_delay(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h
index fc47073c7ba9..6d2b6417fe2a 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7779.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h
@@ -5,6 +5,7 @@
5#include <linux/pm_domain.h> 5#include <linux/pm_domain.h>
6#include <linux/sh_eth.h> 6#include <linux/sh_eth.h>
7#include <linux/platform_data/usb-rcar-phy.h> 7#include <linux/platform_data/usb-rcar-phy.h>
8#include <linux/platform_data/camera-rcar.h>
8 9
9struct platform_device; 10struct platform_device;
10 11
@@ -35,6 +36,8 @@ extern void r8a7779_add_standard_devices(void);
35extern void r8a7779_add_standard_devices_dt(void); 36extern void r8a7779_add_standard_devices_dt(void);
36extern void r8a7779_add_ether_device(struct sh_eth_plat_data *pdata); 37extern void r8a7779_add_ether_device(struct sh_eth_plat_data *pdata);
37extern void r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata); 38extern void r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
39extern void r8a7779_add_vin_device(int idx,
40 struct rcar_vin_platform_data *pdata);
38extern void r8a7779_init_late(void); 41extern void r8a7779_init_late(void);
39extern void r8a7779_clock_init(void); 42extern void r8a7779_clock_init(void);
40extern void r8a7779_pinmux_init(void); 43extern void r8a7779_pinmux_init(void);
diff --git a/arch/arm/mach-shmobile/include/mach/zboot.h b/arch/arm/mach-shmobile/include/mach/zboot.h
index f2d8744c1f14..c3c4669a2d72 100644
--- a/arch/arm/mach-shmobile/include/mach/zboot.h
+++ b/arch/arm/mach-shmobile/include/mach/zboot.h
@@ -1,7 +1,6 @@
1#ifndef ZBOOT_H 1#ifndef ZBOOT_H
2#define ZBOOT_H 2#define ZBOOT_H
3 3
4#include <asm/mach-types.h>
5#include <mach/zboot_macros.h> 4#include <mach/zboot_macros.h>
6 5
7/************************************************** 6/**************************************************
@@ -11,7 +10,6 @@
11 **************************************************/ 10 **************************************************/
12 11
13#ifdef CONFIG_MACH_MACKEREL 12#ifdef CONFIG_MACH_MACKEREL
14#define MACH_TYPE MACH_TYPE_MACKEREL
15#define MEMORY_START 0x40000000 13#define MEMORY_START 0x40000000
16#include "mach/head-mackerel.txt" 14#include "mach/head-mackerel.txt"
17#else 15#else
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index a3a2e37b03f3..203becfc6e31 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -333,6 +333,40 @@ void __init r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info)
333 info, sizeof(*info)); 333 info, sizeof(*info));
334} 334}
335 335
336/* VIN */
337#define R8A7778_VIN(idx) \
338static struct resource vin##idx##_resources[] __initdata = { \
339 DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
340 DEFINE_RES_IRQ(gic_iid(0x5a)), \
341}; \
342 \
343static struct platform_device_info vin##idx##_info __initdata = { \
344 .parent = &platform_bus, \
345 .name = "r8a7778-vin", \
346 .id = idx, \
347 .res = vin##idx##_resources, \
348 .num_res = ARRAY_SIZE(vin##idx##_resources), \
349 .dma_mask = DMA_BIT_MASK(32), \
350}
351
352R8A7778_VIN(0);
353R8A7778_VIN(1);
354
355static struct platform_device_info *vin_info_table[] __initdata = {
356 &vin0_info,
357 &vin1_info,
358};
359
360void __init r8a7778_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
361{
362 BUG_ON(id < 0 || id > 1);
363
364 vin_info_table[id]->data = pdata;
365 vin_info_table[id]->size_data = sizeof(*pdata);
366
367 platform_device_register_full(vin_info_table[id]);
368}
369
336void __init r8a7778_add_standard_devices(void) 370void __init r8a7778_add_standard_devices(void)
337{ 371{
338 int i; 372 int i;
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 66d38261ecaa..41bab625341e 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -559,6 +559,33 @@ static struct resource ether_resources[] = {
559 }, 559 },
560}; 560};
561 561
562#define R8A7779_VIN(idx) \
563static struct resource vin##idx##_resources[] __initdata = { \
564 DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
565 DEFINE_RES_IRQ(gic_iid(0x5f + (idx))), \
566}; \
567 \
568static struct platform_device_info vin##idx##_info __initdata = { \
569 .parent = &platform_bus, \
570 .name = "r8a7779-vin", \
571 .id = idx, \
572 .res = vin##idx##_resources, \
573 .num_res = ARRAY_SIZE(vin##idx##_resources), \
574 .dma_mask = DMA_BIT_MASK(32), \
575}
576
577R8A7779_VIN(0);
578R8A7779_VIN(1);
579R8A7779_VIN(2);
580R8A7779_VIN(3);
581
582static struct platform_device_info *vin_info_table[] __initdata = {
583 &vin0_info,
584 &vin1_info,
585 &vin2_info,
586 &vin3_info,
587};
588
562static struct platform_device *r8a7779_devices_dt[] __initdata = { 589static struct platform_device *r8a7779_devices_dt[] __initdata = {
563 &scif0_device, 590 &scif0_device,
564 &scif1_device, 591 &scif1_device,
@@ -610,6 +637,16 @@ void __init r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata)
610 pdata, sizeof(*pdata)); 637 pdata, sizeof(*pdata));
611} 638}
612 639
640void __init r8a7779_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
641{
642 BUG_ON(id < 0 || id > 3);
643
644 vin_info_table[id]->data = pdata;
645 vin_info_table[id]->size_data = sizeof(*pdata);
646
647 platform_device_register_full(vin_info_table[id]);
648}
649
613/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ 650/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
614void __init __weak r8a7779_register_twd(void) { } 651void __init __weak r8a7779_register_twd(void) { }
615 652
diff --git a/arch/arm/mach-shmobile/sleep-sh7372.S b/arch/arm/mach-shmobile/sleep-sh7372.S
index 53f4840e4949..9782862899e8 100644
--- a/arch/arm/mach-shmobile/sleep-sh7372.S
+++ b/arch/arm/mach-shmobile/sleep-sh7372.S
@@ -41,6 +41,7 @@
41sh7372_resume_core_standby_sysc: 41sh7372_resume_core_standby_sysc:
42 ldr pc, 1f 42 ldr pc, 1f
43 43
44 .align 2
44 .globl sh7372_cpu_resume 45 .globl sh7372_cpu_resume
45sh7372_cpu_resume: 46sh7372_cpu_resume:
461: .space 4 471: .space 4
@@ -96,6 +97,7 @@ sh7372_do_idle_sysc:
961: 971:
97 b 1b 98 b 1b
98 99
100 .align 2
99kernel_flush: 101kernel_flush:
100 .word v7_flush_dcache_all 102 .word v7_flush_dcache_all
101#endif 103#endif
diff --git a/arch/arm/mach-spear/include/mach/debug-macro.S b/arch/arm/mach-spear/include/mach/debug-macro.S
deleted file mode 100644
index 75b05ad0fbad..000000000000
--- a/arch/arm/mach-spear/include/mach/debug-macro.S
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * arch/arm/plat-spear/include/plat/debug-macro.S
3 *
4 * Debugging macro include header for spear platform
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar <viresh.linux@gmail.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/amba/serial.h>
15#include <mach/spear.h>
16
17 .macro addruart, rp, rv, tmp
18 mov \rp, #SPEAR_DBG_UART_BASE @ Physical base
19 mov \rv, #VA_SPEAR_DBG_UART_BASE @ Virtual base
20 .endm
21
22 .macro senduart, rd, rx
23 strb \rd, [\rx, #UART01x_DR] @ ASC_TX_BUFFER
24 .endm
25
26 .macro waituart, rd, rx
271001: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
28 tst \rd, #UART01x_FR_TXFF @ TX_FULL
29 bne 1001b
30 .endm
31
32 .macro busyuart, rd, rx
331002: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
34 tst \rd, #UART011_FR_TXFE @ TX_EMPTY
35 beq 1002b
36 .endm
diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h
index cf3a5369eeca..5cdc53d9b653 100644
--- a/arch/arm/mach-spear/include/mach/spear.h
+++ b/arch/arm/mach-spear/include/mach/spear.h
@@ -39,7 +39,6 @@
39 39
40/* Debug uart for linux, will be used for debug and uncompress messages */ 40/* Debug uart for linux, will be used for debug and uncompress messages */
41#define SPEAR_DBG_UART_BASE SPEAR_ICM1_UART_BASE 41#define SPEAR_DBG_UART_BASE SPEAR_ICM1_UART_BASE
42#define VA_SPEAR_DBG_UART_BASE VA_SPEAR_ICM1_UART_BASE
43 42
44/* Sysctl base for spear platform */ 43/* Sysctl base for spear platform */
45#define SPEAR_SYS_CTRL_BASE SPEAR_ICM3_SYS_CTRL_BASE 44#define SPEAR_SYS_CTRL_BASE SPEAR_ICM3_SYS_CTRL_BASE
@@ -86,7 +85,6 @@
86 85
87/* Debug uart for linux, will be used for debug and uncompress messages */ 86/* Debug uart for linux, will be used for debug and uncompress messages */
88#define SPEAR_DBG_UART_BASE UART_BASE 87#define SPEAR_DBG_UART_BASE UART_BASE
89#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
90 88
91#endif /* SPEAR13XX */ 89#endif /* SPEAR13XX */
92 90
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc7559f53..4c09bae86edf 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19 __INIT
20
21/* 19/*
22 * ST specific entry point for secondary CPUs. This provides 20 * ST specific entry point for secondary CPUs. This provides
23 * a "holding pen" into which all secondary cores are held until we're 21 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 0d1e4128d460..fc97cfd52769 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -29,7 +29,6 @@
29#include <linux/of_fdt.h> 29#include <linux/of_fdt.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/pda_power.h> 31#include <linux/pda_power.h>
32#include <linux/platform_data/tegra_usb.h>
33#include <linux/io.h> 32#include <linux/io.h>
34#include <linux/slab.h> 33#include <linux/slab.h>
35#include <linux/sys_soc.h> 34#include <linux/sys_soc.h>
@@ -46,40 +45,6 @@
46#include "fuse.h" 45#include "fuse.h"
47#include "iomap.h" 46#include "iomap.h"
48 47
49static struct tegra_ehci_platform_data tegra_ehci1_pdata = {
50 .operating_mode = TEGRA_USB_OTG,
51 .power_down_on_bus_suspend = 1,
52 .vbus_gpio = -1,
53};
54
55static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
56 .reset_gpio = -1,
57 .clk = "cdev2",
58};
59
60static struct tegra_ehci_platform_data tegra_ehci2_pdata = {
61 .phy_config = &tegra_ehci2_ulpi_phy_config,
62 .operating_mode = TEGRA_USB_HOST,
63 .power_down_on_bus_suspend = 1,
64 .vbus_gpio = -1,
65};
66
67static struct tegra_ehci_platform_data tegra_ehci3_pdata = {
68 .operating_mode = TEGRA_USB_HOST,
69 .power_down_on_bus_suspend = 1,
70 .vbus_gpio = -1,
71};
72
73static struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
74 OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5000000, "tegra-ehci.0",
75 &tegra_ehci1_pdata),
76 OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5004000, "tegra-ehci.1",
77 &tegra_ehci2_pdata),
78 OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5008000, "tegra-ehci.2",
79 &tegra_ehci3_pdata),
80 {}
81};
82
83static void __init tegra_dt_init(void) 48static void __init tegra_dt_init(void)
84{ 49{
85 struct soc_device_attribute *soc_dev_attr; 50 struct soc_device_attribute *soc_dev_attr;
@@ -112,8 +77,7 @@ static void __init tegra_dt_init(void)
112 * devices 77 * devices
113 */ 78 */
114out: 79out:
115 of_platform_populate(NULL, of_default_bus_match_table, 80 of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
116 tegra20_auxdata_lookup, parent);
117} 81}
118 82
119static void __init trimslice_init(void) 83static void __init trimslice_init(void)
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index bf9b6be5b180..fe1f3e26b88b 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -4,7 +4,6 @@
4 4
5obj-y := cpu.o devices.o devices-common.o \ 5obj-y := cpu.o devices.o devices-common.o \
6 id.o usb.o timer.o pm.o 6 id.o usb.o timer.o pm.o
7obj-$(CONFIG_CPU_IDLE) += cpuidle.o
8obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o 7obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
9obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o 8obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
10obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \ 9obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c
deleted file mode 100644
index a45dd09daed9..000000000000
--- a/arch/arm/mach-ux500/cpuidle.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
3 *
4 * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
5 * and Jonas Aaberg <jonas.aberg@stericsson.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/cpuidle.h>
14#include <linux/spinlock.h>
15#include <linux/atomic.h>
16#include <linux/smp.h>
17#include <linux/mfd/dbx500-prcmu.h>
18#include <linux/platform_data/arm-ux500-pm.h>
19
20#include <asm/cpuidle.h>
21#include <asm/proc-fns.h>
22
23#include "db8500-regs.h"
24#include "id.h"
25
26static atomic_t master = ATOMIC_INIT(0);
27static DEFINE_SPINLOCK(master_lock);
28
29static inline int ux500_enter_idle(struct cpuidle_device *dev,
30 struct cpuidle_driver *drv, int index)
31{
32 int this_cpu = smp_processor_id();
33 bool recouple = false;
34
35 if (atomic_inc_return(&master) == num_online_cpus()) {
36
37 /* With this lock, we prevent the other cpu to exit and enter
38 * this function again and become the master */
39 if (!spin_trylock(&master_lock))
40 goto wfi;
41
42 /* decouple the gic from the A9 cores */
43 if (prcmu_gic_decouple()) {
44 spin_unlock(&master_lock);
45 goto out;
46 }
47
48 /* If an error occur, we will have to recouple the gic
49 * manually */
50 recouple = true;
51
52 /* At this state, as the gic is decoupled, if the other
53 * cpu is in WFI, we have the guarantee it won't be wake
54 * up, so we can safely go to retention */
55 if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
56 goto out;
57
58 /* The prcmu will be in charge of watching the interrupts
59 * and wake up the cpus */
60 if (prcmu_copy_gic_settings())
61 goto out;
62
63 /* Check in the meantime an interrupt did
64 * not occur on the gic ... */
65 if (prcmu_gic_pending_irq())
66 goto out;
67
68 /* ... and the prcmu */
69 if (prcmu_pending_irq())
70 goto out;
71
72 /* Go to the retention state, the prcmu will wait for the
73 * cpu to go WFI and this is what happens after exiting this
74 * 'master' critical section */
75 if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
76 goto out;
77
78 /* When we switch to retention, the prcmu is in charge
79 * of recoupling the gic automatically */
80 recouple = false;
81
82 spin_unlock(&master_lock);
83 }
84wfi:
85 cpu_do_idle();
86out:
87 atomic_dec(&master);
88
89 if (recouple) {
90 prcmu_gic_recouple();
91 spin_unlock(&master_lock);
92 }
93
94 return index;
95}
96
97static struct cpuidle_driver ux500_idle_driver = {
98 .name = "ux500_idle",
99 .owner = THIS_MODULE,
100 .states = {
101 ARM_CPUIDLE_WFI_STATE,
102 {
103 .enter = ux500_enter_idle,
104 .exit_latency = 70,
105 .target_residency = 260,
106 .flags = CPUIDLE_FLAG_TIME_VALID |
107 CPUIDLE_FLAG_TIMER_STOP,
108 .name = "ApIdle",
109 .desc = "ARM Retention",
110 },
111 },
112 .safe_state_index = 0,
113 .state_count = 2,
114};
115
116int __init ux500_idle_init(void)
117{
118 if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
119 return -ENODEV;
120
121 /* Configure wake up reasons */
122 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
123 PRCMU_WAKEUP(ABB));
124
125 return cpuidle_register(&ux500_idle_driver, NULL);
126}
127
128device_initcall(ux500_idle_init);
diff --git a/arch/arm/mach-versatile/include/mach/debug-macro.S b/arch/arm/mach-versatile/include/mach/debug-macro.S
deleted file mode 100644
index d0fbd7f1cb00..000000000000
--- a/arch/arm/mach-versatile/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
1/* arch/arm/mach-versatile/include/mach/debug-macro.S
2 *
3 * Debugging macro include header
4 *
5 * Copyright (C) 1994-1999 Russell King
6 * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12*/
13
14 .macro addruart, rp, rv, tmp
15 mov \rp, #0x001F0000
16 orr \rp, \rp, #0x00001000
17 orr \rv, \rp, #0xf1000000 @ virtual base
18 orr \rp, \rp, #0x10000000 @ physical base
19 .endm
20
21#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index db5c2cab8fda..cd2c88e7a8f7 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -809,15 +809,18 @@ config KUSER_HELPERS
809 the CPU type fitted to the system. This permits binaries to be 809 the CPU type fitted to the system. This permits binaries to be
810 run on ARMv4 through to ARMv7 without modification. 810 run on ARMv4 through to ARMv7 without modification.
811 811
812 See Documentation/arm/kernel_user_helpers.txt for details.
813
812 However, the fixed address nature of these helpers can be used 814 However, the fixed address nature of these helpers can be used
813 by ROP (return orientated programming) authors when creating 815 by ROP (return orientated programming) authors when creating
814 exploits. 816 exploits.
815 817
816 If all of the binaries and libraries which run on your platform 818 If all of the binaries and libraries which run on your platform
817 are built specifically for your platform, and make no use of 819 are built specifically for your platform, and make no use of
818 these helpers, then you can turn this option off. However, 820 these helpers, then you can turn this option off to hinder
819 when such an binary or library is run, it will receive a SIGILL 821 such exploits. However, in that case, if a binary or library
820 signal, which will terminate the program. 822 relying on those helpers is run, it will receive a SIGILL signal,
823 which will terminate the program.
821 824
822 Say N here only if you are absolutely certain that you do not 825 Say N here only if you are absolutely certain that you do not
823 need these helpers; otherwise, the safe option is to say Y. 826 need these helpers; otherwise, the safe option is to say Y.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index d70e0aba0c9d..447da6ffadd5 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -290,7 +290,7 @@ static void l2x0_disable(void)
290 raw_spin_lock_irqsave(&l2x0_lock, flags); 290 raw_spin_lock_irqsave(&l2x0_lock, flags);
291 __l2x0_flush_all(); 291 __l2x0_flush_all();
292 writel_relaxed(0, l2x0_base + L2X0_CTRL); 292 writel_relaxed(0, l2x0_base + L2X0_CTRL);
293 dsb(); 293 dsb(st);
294 raw_spin_unlock_irqrestore(&l2x0_lock, flags); 294 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
295} 295}
296 296
@@ -417,9 +417,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
417 outer_cache.disable = l2x0_disable; 417 outer_cache.disable = l2x0_disable;
418 } 418 }
419 419
420 printk(KERN_INFO "%s cache controller enabled\n", type); 420 pr_info("%s cache controller enabled\n", type);
421 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 421 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
422 ways, cache_id, aux, l2x0_size); 422 ways, cache_id, aux, l2x0_size >> 10);
423} 423}
424 424
425#ifdef CONFIG_OF 425#ifdef CONFIG_OF
@@ -929,7 +929,9 @@ static const struct of_device_id l2x0_ids[] __initconst = {
929 .data = (void *)&aurora_no_outer_data}, 929 .data = (void *)&aurora_no_outer_data},
930 { .compatible = "marvell,aurora-outer-cache", 930 { .compatible = "marvell,aurora-outer-cache",
931 .data = (void *)&aurora_with_outer_data}, 931 .data = (void *)&aurora_with_outer_data},
932 { .compatible = "bcm,bcm11351-a2-pl310-cache", 932 { .compatible = "brcm,bcm11351-a2-pl310-cache",
933 .data = (void *)&bcm_l2x0_data},
934 { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
933 .data = (void *)&bcm_l2x0_data}, 935 .data = (void *)&bcm_l2x0_data},
934 {} 936 {}
935}; 937};
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 515b00064da8..b5c467a65c27 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range)
282 add r12, r12, r2 282 add r12, r12, r2
283 cmp r12, r1 283 cmp r12, r1
284 blo 1b 284 blo 1b
285 dsb 285 dsb ishst
286 icache_line_size r2, r3 286 icache_line_size r2, r3
287 sub r3, r2, #1 287 sub r3, r2, #1
288 bic r12, r0, r3 288 bic r12, r0, r3
@@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range)
294 mov r0, #0 294 mov r0, #0
295 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable 295 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
297 dsb 297 dsb ishst
298 isb 298 isb
299 mov pc, lr 299 mov pc, lr
300 300
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 4a0544492f10..84e6f772e204 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -162,10 +162,7 @@ static void flush_context(unsigned int cpu)
162 } 162 }
163 163
164 /* Queue a TLB invalidate and flush the I-cache if necessary. */ 164 /* Queue a TLB invalidate and flush the I-cache if necessary. */
165 if (!tlb_ops_need_broadcast()) 165 cpumask_setall(&tlb_flush_pending);
166 cpumask_set_cpu(cpu, &tlb_flush_pending);
167 else
168 cpumask_setall(&tlb_flush_pending);
169 166
170 if (icache_is_vivt_asid_tagged()) 167 if (icache_is_vivt_asid_tagged())
171 __flush_icache_all(); 168 __flush_icache_all();
@@ -245,8 +242,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
245 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 242 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
246 local_flush_bp_all(); 243 local_flush_bp_all();
247 local_flush_tlb_all(); 244 local_flush_tlb_all();
248 if (erratum_a15_798181())
249 dummy_flush_tlb_a15_erratum();
250 } 245 }
251 246
252 atomic64_set(&per_cpu(active_asids, cpu), asid); 247 atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7f9b1798c6cf..f5e1a8471714 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@ static int __init atomic_pool_init(void)
358 if (!pages) 358 if (!pages)
359 goto no_pages; 359 goto no_pages;
360 360
361 if (IS_ENABLED(CONFIG_CMA)) 361 if (IS_ENABLED(CONFIG_DMA_CMA))
362 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, 362 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
363 atomic_pool_init); 363 atomic_pool_init);
364 else 364 else
@@ -455,7 +455,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
455 unsigned end = start + size; 455 unsigned end = start + size;
456 456
457 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); 457 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
458 dsb();
459 flush_tlb_kernel_range(start, end); 458 flush_tlb_kernel_range(start, end);
460} 459}
461 460
@@ -670,7 +669,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
670 addr = __alloc_simple_buffer(dev, size, gfp, &page); 669 addr = __alloc_simple_buffer(dev, size, gfp, &page);
671 else if (!(gfp & __GFP_WAIT)) 670 else if (!(gfp & __GFP_WAIT))
672 addr = __alloc_from_pool(size, &page); 671 addr = __alloc_from_pool(size, &page);
673 else if (!IS_ENABLED(CONFIG_CMA)) 672 else if (!IS_ENABLED(CONFIG_DMA_CMA))
674 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 673 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
675 else 674 else
676 addr = __alloc_from_contiguous(dev, size, prot, &page, caller); 675 addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -759,7 +758,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
759 __dma_free_buffer(page, size); 758 __dma_free_buffer(page, size);
760 } else if (__free_from_pool(cpu_addr, size)) { 759 } else if (__free_from_pool(cpu_addr, size)) {
761 return; 760 return;
762 } else if (!IS_ENABLED(CONFIG_CMA)) { 761 } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
763 __dma_free_remap(cpu_addr, size); 762 __dma_free_remap(cpu_addr, size);
764 __dma_free_buffer(page, size); 763 __dma_free_buffer(page, size);
765 } else { 764 } else {
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 3d1e4a205b0b..66781bf34077 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -36,22 +36,6 @@
36 * of type casting from pmd_t * to pte_t *. 36 * of type casting from pmd_t * to pte_t *.
37 */ 37 */
38 38
39pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
40{
41 pgd_t *pgd;
42 pud_t *pud;
43 pmd_t *pmd = NULL;
44
45 pgd = pgd_offset(mm, addr);
46 if (pgd_present(*pgd)) {
47 pud = pud_offset(pgd, addr);
48 if (pud_present(*pud))
49 pmd = pmd_offset(pud, addr);
50 }
51
52 return (pte_t *)pmd;
53}
54
55struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 39struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
56 int write) 40 int write)
57{ 41{
@@ -68,33 +52,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
68 return 0; 52 return 0;
69} 53}
70 54
71pte_t *huge_pte_alloc(struct mm_struct *mm,
72 unsigned long addr, unsigned long sz)
73{
74 pgd_t *pgd;
75 pud_t *pud;
76 pte_t *pte = NULL;
77
78 pgd = pgd_offset(mm, addr);
79 pud = pud_alloc(mm, pgd, addr);
80 if (pud)
81 pte = (pte_t *)pmd_alloc(mm, pud, addr);
82
83 return pte;
84}
85
86struct page *
87follow_huge_pmd(struct mm_struct *mm, unsigned long address,
88 pmd_t *pmd, int write)
89{
90 struct page *page;
91
92 page = pte_page(*(pte_t *)pmd);
93 if (page)
94 page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
95 return page;
96}
97
98int pmd_huge(pmd_t pmd) 55int pmd_huge(pmd_t pmd)
99{ 56{
100 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); 57 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 15225d829d71..2958e74fc42c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -231,7 +231,7 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
231} 231}
232#endif 232#endif
233 233
234void __init setup_dma_zone(struct machine_desc *mdesc) 234void __init setup_dma_zone(const struct machine_desc *mdesc)
235{ 235{
236#ifdef CONFIG_ZONE_DMA 236#ifdef CONFIG_ZONE_DMA
237 if (mdesc->dma_zone_size) { 237 if (mdesc->dma_zone_size) {
@@ -335,7 +335,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
335 return phys; 335 return phys;
336} 336}
337 337
338void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) 338void __init arm_memblock_init(struct meminfo *mi,
339 const struct machine_desc *mdesc)
339{ 340{
340 int i; 341 int i;
341 342
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 53cdbd39ec8e..b1d17eeb59b8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1186,7 +1186,7 @@ void __init arm_mm_memblock_reserve(void)
1186 * called function. This means you can't use any function or debugging 1186 * called function. This means you can't use any function or debugging
1187 * method which may touch any device, otherwise the kernel _will_ crash. 1187 * method which may touch any device, otherwise the kernel _will_ crash.
1188 */ 1188 */
1189static void __init devicemaps_init(struct machine_desc *mdesc) 1189static void __init devicemaps_init(const struct machine_desc *mdesc)
1190{ 1190{
1191 struct map_desc map; 1191 struct map_desc map;
1192 unsigned long addr; 1192 unsigned long addr;
@@ -1319,7 +1319,7 @@ static void __init map_lowmem(void)
1319 * paging_init() sets up the page tables, initialises the zone memory 1319 * paging_init() sets up the page tables, initialises the zone memory
1320 * maps, and sets up the zero page, bad page and bad page tables. 1320 * maps, and sets up the zero page, bad page and bad page tables.
1321 */ 1321 */
1322void __init paging_init(struct machine_desc *mdesc) 1322void __init paging_init(const struct machine_desc *mdesc)
1323{ 1323{
1324 void *zero_page; 1324 void *zero_page;
1325 1325
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1fa50100ab6a..34d4ab217bab 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -299,7 +299,7 @@ void __init sanity_check_meminfo(void)
299 * paging_init() sets up the page tables, initialises the zone memory 299 * paging_init() sets up the page tables, initialises the zone memory
300 * maps, and sets up the zero page, bad page and bad page tables. 300 * maps, and sets up the zero page, bad page and bad page tables.
301 */ 301 */
302void __init paging_init(struct machine_desc *mdesc) 302void __init paging_init(const struct machine_desc *mdesc)
303{ 303{
304 early_trap_init((void *)CONFIG_VECTORS_BASE); 304 early_trap_init((void *)CONFIG_VECTORS_BASE);
305 mpu_setup(); 305 mpu_setup();
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d5146b98c8d1..db79b62c92fb 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -514,6 +514,32 @@ ENTRY(cpu_feroceon_set_pte_ext)
514#endif 514#endif
515 mov pc, lr 515 mov pc, lr
516 516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl cpu_feroceon_suspend_size
519.equ cpu_feroceon_suspend_size, 4 * 3
520#ifdef CONFIG_ARM_CPU_SUSPEND
521ENTRY(cpu_feroceon_do_suspend)
522 stmfd sp!, {r4 - r6, lr}
523 mrc p15, 0, r4, c13, c0, 0 @ PID
524 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
525 mrc p15, 0, r6, c1, c0, 0 @ Control register
526 stmia r0, {r4 - r6}
527 ldmfd sp!, {r4 - r6, pc}
528ENDPROC(cpu_feroceon_do_suspend)
529
530ENTRY(cpu_feroceon_do_resume)
531 mov ip, #0
532 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
533 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
534 ldmia r0, {r4 - r6}
535 mcr p15, 0, r4, c13, c0, 0 @ PID
536 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
537 mcr p15, 0, r1, c2, c0, 0 @ TTB address
538 mov r0, r6 @ control register
539 b cpu_resume_mmu
540ENDPROC(cpu_feroceon_do_resume)
541#endif
542
517 .type __feroceon_setup, #function 543 .type __feroceon_setup, #function
518__feroceon_setup: 544__feroceon_setup:
519 mov r0, #0 545 mov r0, #0
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 73398bcf9bd8..c63d9bdee51e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -83,7 +83,7 @@ ENTRY(cpu_v7_dcache_clean_area)
83 add r0, r0, r2 83 add r0, r0, r2
84 subs r1, r1, r2 84 subs r1, r1, r2
85 bhi 2b 85 bhi 2b
86 dsb 86 dsb ishst
87 mov pc, lr 87 mov pc, lr
88ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
89 89
@@ -330,7 +330,19 @@ __v7_setup:
3301: 3301:
331#endif 331#endif
332 332
3333: mov r10, #0 333 /* Cortex-A15 Errata */
3343: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
335 teq r0, r10
336 bne 4f
337
338#ifdef CONFIG_ARM_ERRATA_773022
339 cmp r6, #0x4 @ only present up to r0p4
340 mrcle p15, 0, r10, c1, c0, 1 @ read aux control register
341 orrle r10, r10, #1 << 1 @ disable loop buffer
342 mcrle p15, 0, r10, c1, c0, 1 @ write aux control register
343#endif
344
3454: mov r10, #0
334 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate 346 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
335 dsb 347 dsb
336#ifdef CONFIG_MMU 348#ifdef CONFIG_MMU
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index ea94765acf9a..355308767bae 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -35,7 +35,7 @@
35ENTRY(v7wbi_flush_user_tlb_range) 35ENTRY(v7wbi_flush_user_tlb_range)
36 vma_vm_mm r3, r2 @ get vma->vm_mm 36 vma_vm_mm r3, r2 @ get vma->vm_mm
37 mmid r3, r3 @ get vm_mm->context.id 37 mmid r3, r3 @ get vm_mm->context.id
38 dsb 38 dsb ish
39 mov r0, r0, lsr #PAGE_SHIFT @ align address 39 mov r0, r0, lsr #PAGE_SHIFT @ align address
40 mov r1, r1, lsr #PAGE_SHIFT 40 mov r1, r1, lsr #PAGE_SHIFT
41 asid r3, r3 @ mask ASID 41 asid r3, r3 @ mask ASID
@@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
56 add r0, r0, #PAGE_SZ 56 add r0, r0, #PAGE_SZ
57 cmp r0, r1 57 cmp r0, r1
58 blo 1b 58 blo 1b
59 dsb 59 dsb ish
60 mov pc, lr 60 mov pc, lr
61ENDPROC(v7wbi_flush_user_tlb_range) 61ENDPROC(v7wbi_flush_user_tlb_range)
62 62
@@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
69 * - end - end address (exclusive, may not be aligned) 69 * - end - end address (exclusive, may not be aligned)
70 */ 70 */
71ENTRY(v7wbi_flush_kern_tlb_range) 71ENTRY(v7wbi_flush_kern_tlb_range)
72 dsb 72 dsb ish
73 mov r0, r0, lsr #PAGE_SHIFT @ align address 73 mov r0, r0, lsr #PAGE_SHIFT @ align address
74 mov r1, r1, lsr #PAGE_SHIFT 74 mov r1, r1, lsr #PAGE_SHIFT
75 mov r0, r0, lsl #PAGE_SHIFT 75 mov r0, r0, lsl #PAGE_SHIFT
@@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
84 add r0, r0, #PAGE_SZ 84 add r0, r0, #PAGE_SZ
85 cmp r0, r1 85 cmp r0, r1
86 blo 1b 86 blo 1b
87 dsb 87 dsb ish
88 isb 88 isb
89 mov pc, lr 89 mov pc, lr
90ENDPROC(v7wbi_flush_kern_tlb_range) 90ENDPROC(v7wbi_flush_kern_tlb_range)
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 4d463ca6821f..037660633fa4 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -2083,6 +2083,7 @@ static int omap_system_dma_probe(struct platform_device *pdev)
2083 dma_irq = platform_get_irq_byname(pdev, irq_name); 2083 dma_irq = platform_get_irq_byname(pdev, irq_name);
2084 if (dma_irq < 0) { 2084 if (dma_irq < 0) {
2085 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq); 2085 dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2086 ret = dma_irq;
2086 goto exit_dma_lch_fail; 2087 goto exit_dma_lch_fail;
2087 } 2088 }
2088 ret = setup_irq(dma_irq, &omap24xx_dma_irq); 2089 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 8e11e96eab5e..c83f27b6bdda 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -30,6 +30,8 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/spi/pxa2xx_spi.h> 31#include <linux/spi/pxa2xx_spi.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/of.h>
34#include <linux/of_device.h>
33 35
34#include <asm/irq.h> 36#include <asm/irq.h>
35#include <mach/hardware.h> 37#include <mach/hardware.h>
@@ -60,6 +62,30 @@ struct ssp_device *pxa_ssp_request(int port, const char *label)
60} 62}
61EXPORT_SYMBOL(pxa_ssp_request); 63EXPORT_SYMBOL(pxa_ssp_request);
62 64
65struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
66 const char *label)
67{
68 struct ssp_device *ssp = NULL;
69
70 mutex_lock(&ssp_lock);
71
72 list_for_each_entry(ssp, &ssp_list, node) {
73 if (ssp->of_node == of_node && ssp->use_count == 0) {
74 ssp->use_count++;
75 ssp->label = label;
76 break;
77 }
78 }
79
80 mutex_unlock(&ssp_lock);
81
82 if (&ssp->node == &ssp_list)
83 return NULL;
84
85 return ssp;
86}
87EXPORT_SYMBOL(pxa_ssp_request_of);
88
63void pxa_ssp_free(struct ssp_device *ssp) 89void pxa_ssp_free(struct ssp_device *ssp)
64{ 90{
65 mutex_lock(&ssp_lock); 91 mutex_lock(&ssp_lock);
@@ -72,96 +98,126 @@ void pxa_ssp_free(struct ssp_device *ssp)
72} 98}
73EXPORT_SYMBOL(pxa_ssp_free); 99EXPORT_SYMBOL(pxa_ssp_free);
74 100
101#ifdef CONFIG_OF
102static const struct of_device_id pxa_ssp_of_ids[] = {
103 { .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
104 { .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
105 { .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
106 { .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
107 { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
108 { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
109 { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
110 { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
111 { },
112};
113MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
114#endif
115
75static int pxa_ssp_probe(struct platform_device *pdev) 116static int pxa_ssp_probe(struct platform_device *pdev)
76{ 117{
77 const struct platform_device_id *id = platform_get_device_id(pdev);
78 struct resource *res; 118 struct resource *res;
79 struct ssp_device *ssp; 119 struct ssp_device *ssp;
80 int ret = 0; 120 struct device *dev = &pdev->dev;
81 121
82 ssp = kzalloc(sizeof(struct ssp_device), GFP_KERNEL); 122 ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
83 if (ssp == NULL) { 123 if (ssp == NULL)
84 dev_err(&pdev->dev, "failed to allocate memory");
85 return -ENOMEM; 124 return -ENOMEM;
86 }
87 ssp->pdev = pdev;
88 125
89 ssp->clk = clk_get(&pdev->dev, NULL); 126 ssp->pdev = pdev;
90 if (IS_ERR(ssp->clk)) {
91 ret = PTR_ERR(ssp->clk);
92 goto err_free;
93 }
94 127
95 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 128 ssp->clk = devm_clk_get(dev, NULL);
96 if (res == NULL) { 129 if (IS_ERR(ssp->clk))
97 dev_err(&pdev->dev, "no SSP RX DRCMR defined\n"); 130 return PTR_ERR(ssp->clk);
98 ret = -ENODEV; 131
99 goto err_free_clk; 132 if (dev->of_node) {
100 } 133 struct of_phandle_args dma_spec;
101 ssp->drcmr_rx = res->start; 134 struct device_node *np = dev->of_node;
135
136 /*
137 * FIXME: we should allocate the DMA channel from this
138 * context and pass the channel down to the ssp users.
139 * For now, we lookup the rx and tx indices manually
140 */
141
142 /* rx */
143 of_parse_phandle_with_args(np, "dmas", "#dma-cells",
144 0, &dma_spec);
145 ssp->drcmr_rx = dma_spec.args[0];
146 of_node_put(dma_spec.np);
147
148 /* tx */
149 of_parse_phandle_with_args(np, "dmas", "#dma-cells",
150 1, &dma_spec);
151 ssp->drcmr_tx = dma_spec.args[0];
152 of_node_put(dma_spec.np);
153 } else {
154 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
155 if (res == NULL) {
156 dev_err(dev, "no SSP RX DRCMR defined\n");
157 return -ENODEV;
158 }
159 ssp->drcmr_rx = res->start;
102 160
103 res = platform_get_resource(pdev, IORESOURCE_DMA, 1); 161 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
104 if (res == NULL) { 162 if (res == NULL) {
105 dev_err(&pdev->dev, "no SSP TX DRCMR defined\n"); 163 dev_err(dev, "no SSP TX DRCMR defined\n");
106 ret = -ENODEV; 164 return -ENODEV;
107 goto err_free_clk; 165 }
166 ssp->drcmr_tx = res->start;
108 } 167 }
109 ssp->drcmr_tx = res->start;
110 168
111 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
112 if (res == NULL) { 170 if (res == NULL) {
113 dev_err(&pdev->dev, "no memory resource defined\n"); 171 dev_err(dev, "no memory resource defined\n");
114 ret = -ENODEV; 172 return -ENODEV;
115 goto err_free_clk;
116 } 173 }
117 174
118 res = request_mem_region(res->start, resource_size(res), 175 res = devm_request_mem_region(dev, res->start, resource_size(res),
119 pdev->name); 176 pdev->name);
120 if (res == NULL) { 177 if (res == NULL) {
121 dev_err(&pdev->dev, "failed to request memory resource\n"); 178 dev_err(dev, "failed to request memory resource\n");
122 ret = -EBUSY; 179 return -EBUSY;
123 goto err_free_clk;
124 } 180 }
125 181
126 ssp->phys_base = res->start; 182 ssp->phys_base = res->start;
127 183
128 ssp->mmio_base = ioremap(res->start, resource_size(res)); 184 ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
129 if (ssp->mmio_base == NULL) { 185 if (ssp->mmio_base == NULL) {
130 dev_err(&pdev->dev, "failed to ioremap() registers\n"); 186 dev_err(dev, "failed to ioremap() registers\n");
131 ret = -ENODEV; 187 return -ENODEV;
132 goto err_free_mem;
133 } 188 }
134 189
135 ssp->irq = platform_get_irq(pdev, 0); 190 ssp->irq = platform_get_irq(pdev, 0);
136 if (ssp->irq < 0) { 191 if (ssp->irq < 0) {
137 dev_err(&pdev->dev, "no IRQ resource defined\n"); 192 dev_err(dev, "no IRQ resource defined\n");
138 ret = -ENODEV; 193 return -ENODEV;
139 goto err_free_io; 194 }
195
196 if (dev->of_node) {
197 const struct of_device_id *id =
198 of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
199 ssp->type = (int) id->data;
200 } else {
201 const struct platform_device_id *id =
202 platform_get_device_id(pdev);
203 ssp->type = (int) id->driver_data;
204
205 /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
206 * starts from 0, do a translation here
207 */
208 ssp->port_id = pdev->id + 1;
140 } 209 }
141 210
142 /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
143 * starts from 0, do a translation here
144 */
145 ssp->port_id = pdev->id + 1;
146 ssp->use_count = 0; 211 ssp->use_count = 0;
147 ssp->type = (int)id->driver_data; 212 ssp->of_node = dev->of_node;
148 213
149 mutex_lock(&ssp_lock); 214 mutex_lock(&ssp_lock);
150 list_add(&ssp->node, &ssp_list); 215 list_add(&ssp->node, &ssp_list);
151 mutex_unlock(&ssp_lock); 216 mutex_unlock(&ssp_lock);
152 217
153 platform_set_drvdata(pdev, ssp); 218 platform_set_drvdata(pdev, ssp);
154 return 0;
155 219
156err_free_io: 220 return 0;
157 iounmap(ssp->mmio_base);
158err_free_mem:
159 release_mem_region(res->start, resource_size(res));
160err_free_clk:
161 clk_put(ssp->clk);
162err_free:
163 kfree(ssp);
164 return ret;
165} 221}
166 222
167static int pxa_ssp_remove(struct platform_device *pdev) 223static int pxa_ssp_remove(struct platform_device *pdev)
@@ -201,8 +257,9 @@ static struct platform_driver pxa_ssp_driver = {
201 .probe = pxa_ssp_probe, 257 .probe = pxa_ssp_probe,
202 .remove = pxa_ssp_remove, 258 .remove = pxa_ssp_remove,
203 .driver = { 259 .driver = {
204 .owner = THIS_MODULE, 260 .owner = THIS_MODULE,
205 .name = "pxa2xx-ssp", 261 .name = "pxa2xx-ssp",
262 .of_match_table = of_match_ptr(pxa_ssp_of_ids),
206 }, 263 },
207 .id_table = ssp_id_table, 264 .id_table = ssp_id_table,
208}; 265};
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa5..50a3ea0037db 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
55 55
56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); 56 printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
57 57
58 if (cpu->map_io == NULL || cpu->init == NULL) { 58 if (cpu->init == NULL) {
59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); 59 printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
60 panic("Unsupported Samsung CPU"); 60 panic("Unsupported Samsung CPU");
61 } 61 }
62 62
63 cpu->map_io(); 63 if (cpu->map_io)
64 cpu->map_io();
64} 65}
65 66
66/* s3c24xx_init_clocks 67/* s3c24xx_init_clocks
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index 0cc40aea3f5a..98b10ba67dc7 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -82,7 +82,8 @@ static int s3c_dma_config(unsigned ch, struct samsung_dma_config *param)
82static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param) 82static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
83{ 83{
84 struct cb_data *data; 84 struct cb_data *data;
85 int len = (param->cap == DMA_CYCLIC) ? param->period : param->len; 85 dma_addr_t pos = param->buf;
86 dma_addr_t end = param->buf + param->len;
86 87
87 list_for_each_entry(data, &dma_list, node) 88 list_for_each_entry(data, &dma_list, node)
88 if (data->ch == ch) 89 if (data->ch == ch)
@@ -94,7 +95,15 @@ static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
94 data->fp_param = param->fp_param; 95 data->fp_param = param->fp_param;
95 } 96 }
96 97
97 s3c2410_dma_enqueue(ch, (void *)data, param->buf, len); 98 if (param->cap != DMA_CYCLIC) {
99 s3c2410_dma_enqueue(ch, (void *)data, param->buf, param->len);
100 return 0;
101 }
102
103 while (pos < end) {
104 s3c2410_dma_enqueue(ch, (void *)data, pos, param->period);
105 pos += param->period;
106 }
98 107
99 return 0; 108 return 0;
100} 109}
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 8d10dc8a1e17..3e5d3115a2a6 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -78,6 +78,11 @@
78ENTRY(vfp_support_entry) 78ENTRY(vfp_support_entry)
79 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10 79 DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
80 80
81 ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions
82 and r3, r3, #MODE_MASK @ are supported in kernel mode
83 teq r3, #USR_MODE
84 bne vfp_kmode_exception @ Returns through lr
85
81 VFPFMRX r1, FPEXC @ Is the VFP enabled? 86 VFPFMRX r1, FPEXC @ Is the VFP enabled?
82 DBGSTR1 "fpexc %08x", r1 87 DBGSTR1 "fpexc %08x", r1
83 tst r1, #FPEXC_EN 88 tst r1, #FPEXC_EN
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 5dfbb0b8e7f4..52b8f40b1c73 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -20,6 +20,7 @@
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/user.h> 22#include <linux/user.h>
23#include <linux/export.h>
23 24
24#include <asm/cp15.h> 25#include <asm/cp15.h>
25#include <asm/cputype.h> 26#include <asm/cputype.h>
@@ -648,6 +649,72 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
648 return NOTIFY_OK; 649 return NOTIFY_OK;
649} 650}
650 651
652void vfp_kmode_exception(void)
653{
654 /*
655 * If we reach this point, a floating point exception has been raised
656 * while running in kernel mode. If the NEON/VFP unit was enabled at the
657 * time, it means a VFP instruction has been issued that requires
658 * software assistance to complete, something which is not currently
659 * supported in kernel mode.
660 * If the NEON/VFP unit was disabled, and the location pointed to below
661 * is properly preceded by a call to kernel_neon_begin(), something has
662 * caused the task to be scheduled out and back in again. In this case,
663 * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
664 * be helpful in localizing the problem.
665 */
666 if (fmrx(FPEXC) & FPEXC_EN)
667 pr_crit("BUG: unsupported FP instruction in kernel mode\n");
668 else
669 pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
670}
671
672#ifdef CONFIG_KERNEL_MODE_NEON
673
674/*
675 * Kernel-side NEON support functions
676 */
677void kernel_neon_begin(void)
678{
679 struct thread_info *thread = current_thread_info();
680 unsigned int cpu;
681 u32 fpexc;
682
683 /*
684 * Kernel mode NEON is only allowed outside of interrupt context
685 * with preemption disabled. This will make sure that the kernel
686 * mode NEON register contents never need to be preserved.
687 */
688 BUG_ON(in_interrupt());
689 cpu = get_cpu();
690
691 fpexc = fmrx(FPEXC) | FPEXC_EN;
692 fmxr(FPEXC, fpexc);
693
694 /*
695 * Save the userland NEON/VFP state. Under UP,
696 * the owner could be a task other than 'current'
697 */
698 if (vfp_state_in_hw(cpu, thread))
699 vfp_save_state(&thread->vfpstate, fpexc);
700#ifndef CONFIG_SMP
701 else if (vfp_current_hw_state[cpu] != NULL)
702 vfp_save_state(vfp_current_hw_state[cpu], fpexc);
703#endif
704 vfp_current_hw_state[cpu] = NULL;
705}
706EXPORT_SYMBOL(kernel_neon_begin);
707
708void kernel_neon_end(void)
709{
710 /* Disable the NEON/VFP unit. */
711 fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
712 put_cpu();
713}
714EXPORT_SYMBOL(kernel_neon_end);
715
716#endif /* CONFIG_KERNEL_MODE_NEON */
717
651/* 718/*
652 * VFP support code initialisation. 719 * VFP support code initialisation.
653 */ 720 */
@@ -731,4 +798,4 @@ static int __init vfp_init(void)
731 return 0; 798 return 0;
732} 799}
733 800
734late_initcall(vfp_init); 801core_initcall(vfp_init);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c9770ba5c7df..8a6295c86209 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused)
170 per_cpu(xen_vcpu, cpu) = vcpup; 170 per_cpu(xen_vcpu, cpu) = vcpup;
171 171
172 enable_percpu_irq(xen_events_irq, 0); 172 enable_percpu_irq(xen_events_irq, 0);
173 put_cpu();
173} 174}
174 175
175static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 176static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9737e97f9f38..ae323a45c28c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -96,6 +96,9 @@ config SWIOTLB
96config IOMMU_HELPER 96config IOMMU_HELPER
97 def_bool SWIOTLB 97 def_bool SWIOTLB
98 98
99config KERNEL_MODE_NEON
100 def_bool y
101
99source "init/Kconfig" 102source "init/Kconfig"
100 103
101source "kernel/Kconfig.freezer" 104source "kernel/Kconfig.freezer"
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 98abd476992d..c9f1d2816c2b 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -26,7 +26,13 @@
26 26
27#include <clocksource/arm_arch_timer.h> 27#include <clocksource/arm_arch_timer.h>
28 28
29static inline void arch_timer_reg_write(int access, int reg, u32 val) 29/*
30 * These register accessors are marked inline so the compiler can
31 * nicely work out which register we want, and chuck away the rest of
32 * the code.
33 */
34static __always_inline
35void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
30{ 36{
31 if (access == ARCH_TIMER_PHYS_ACCESS) { 37 if (access == ARCH_TIMER_PHYS_ACCESS) {
32 switch (reg) { 38 switch (reg) {
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
36 case ARCH_TIMER_REG_TVAL: 42 case ARCH_TIMER_REG_TVAL:
37 asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); 43 asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
38 break; 44 break;
39 default:
40 BUILD_BUG();
41 } 45 }
42 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 46 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
43 switch (reg) { 47 switch (reg) {
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
47 case ARCH_TIMER_REG_TVAL: 51 case ARCH_TIMER_REG_TVAL:
48 asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); 52 asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
49 break; 53 break;
50 default:
51 BUILD_BUG();
52 } 54 }
53 } else {
54 BUILD_BUG();
55 } 55 }
56 56
57 isb(); 57 isb();
58} 58}
59 59
60static inline u32 arch_timer_reg_read(int access, int reg) 60static __always_inline
61u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
61{ 62{
62 u32 val; 63 u32 val;
63 64
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg)
69 case ARCH_TIMER_REG_TVAL: 70 case ARCH_TIMER_REG_TVAL:
70 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); 71 asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
71 break; 72 break;
72 default:
73 BUILD_BUG();
74 } 73 }
75 } else if (access == ARCH_TIMER_VIRT_ACCESS) { 74 } else if (access == ARCH_TIMER_VIRT_ACCESS) {
76 switch (reg) { 75 switch (reg) {
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg)
80 case ARCH_TIMER_REG_TVAL: 79 case ARCH_TIMER_REG_TVAL:
81 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); 80 asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
82 break; 81 break;
83 default:
84 BUILD_BUG();
85 } 82 }
86 } else {
87 BUILD_BUG();
88 } 83 }
89 84
90 return val; 85 return val;
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index fe32c0e4ac01..e7fa87f9201b 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -33,8 +33,6 @@ typedef unsigned long elf_greg_t;
33typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 33typedef elf_greg_t elf_gregset_t[ELF_NGREG];
34typedef struct user_fpsimd_state elf_fpregset_t; 34typedef struct user_fpsimd_state elf_fpregset_t;
35 35
36#define EM_AARCH64 183
37
38/* 36/*
39 * AArch64 static relocation types. 37 * AArch64 static relocation types.
40 */ 38 */
@@ -151,7 +149,6 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
151#define arch_randomize_brk arch_randomize_brk 149#define arch_randomize_brk arch_randomize_brk
152 150
153#ifdef CONFIG_COMPAT 151#ifdef CONFIG_COMPAT
154#define EM_ARM 40
155#define COMPAT_ELF_PLATFORM ("v8l") 152#define COMPAT_ELF_PLATFORM ("v8l")
156 153
157#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) 154#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eba..b25763bc0ec4 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
42#define TPIDR_EL1 18 /* Thread ID, Privileged */ 42#define TPIDR_EL1 18 /* Thread ID, Privileged */
43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ 43#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ 44#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
45#define PAR_EL1 21 /* Physical Address Register */
45/* 32bit specific registers. Keep them at the end of the range */ 46/* 32bit specific registers. Keep them at the end of the range */
46#define DACR32_EL2 21 /* Domain Access Control Register */ 47#define DACR32_EL2 22 /* Domain Access Control Register */
47#define IFSR32_EL2 22 /* Instruction Fault Status Register */ 48#define IFSR32_EL2 23 /* Instruction Fault Status Register */
48#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ 49#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
49#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ 50#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
50#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ 51#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
51#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ 52#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
52#define NR_SYS_REGS 27 53#define NR_SYS_REGS 28
53 54
54/* 32bit mapping */ 55/* 32bit mapping */
55#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ 56#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
69#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ 70#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
70#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ 71#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
71#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ 72#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
73#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
74#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
72#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ 75#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
73#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ 76#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
74#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ 77#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d73956864..0859a4ddd1e7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
129 struct kvm_mmu_memory_cache mmu_page_cache; 129 struct kvm_mmu_memory_cache mmu_page_cache;
130 130
131 /* Target CPU and feature flags */ 131 /* Target CPU and feature flags */
132 u32 target; 132 int target;
133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); 133 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
134 134
135 /* Detect first run of a vcpu */ 135 /* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/neon.h b/arch/arm64/include/asm/neon.h
new file mode 100644
index 000000000000..b0cc58a97780
--- /dev/null
+++ b/arch/arm64/include/asm/neon.h
@@ -0,0 +1,14 @@
1/*
2 * linux/arch/arm64/include/asm/neon.h
3 *
4 * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define cpu_has_neon() (1)
12
13void kernel_neon_begin(void);
14void kernel_neon_end(void);
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
index 3c3ca7d361e4..5f101e63dfc1 100644
--- a/arch/arm64/include/asm/pgtable-2level-types.h
+++ b/arch/arm64/include/asm/pgtable-2level-types.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H 16#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
17#define __ASM_PGTABLE_2LEVEL_TYPES_H 17#define __ASM_PGTABLE_2LEVEL_TYPES_H
18 18
19#include <asm/types.h>
20
19typedef u64 pteval_t; 21typedef u64 pteval_t;
20typedef u64 pgdval_t; 22typedef u64 pgdval_t;
21typedef pgdval_t pmdval_t; 23typedef pgdval_t pmdval_t;
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
index 4489615f14a9..4e94424938a4 100644
--- a/arch/arm64/include/asm/pgtable-3level-types.h
+++ b/arch/arm64/include/asm/pgtable-3level-types.h
@@ -16,6 +16,8 @@
16#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H 16#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
17#define __ASM_PGTABLE_3LEVEL_TYPES_H 17#define __ASM_PGTABLE_3LEVEL_TYPES_H
18 18
19#include <asm/types.h>
20
19typedef u64 pteval_t; 21typedef u64 pteval_t;
20typedef u64 pmdval_t; 22typedef u64 pmdval_t;
21typedef u64 pgdval_t; 23typedef u64 pgdval_t;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index e182a356c979..d57e66845c86 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -122,5 +122,6 @@
122#define TCR_TG1_64K (UL(1) << 30) 122#define TCR_TG1_64K (UL(1) << 30)
123#define TCR_IPS_40BIT (UL(2) << 32) 123#define TCR_IPS_40BIT (UL(2) << 32)
124#define TCR_ASID16 (UL(1) << 36) 124#define TCR_ASID16 (UL(1) << 36)
125#define TCR_TBI0 (UL(1) << 37)
125 126
126#endif 127#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b773..717031a762c2 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
35 struct mm_struct *mm; 35 struct mm_struct *mm;
36 unsigned int fullmm; 36 unsigned int fullmm;
37 struct vm_area_struct *vma; 37 struct vm_area_struct *vma;
38 unsigned long start, end;
38 unsigned long range_start; 39 unsigned long range_start;
39 unsigned long range_end; 40 unsigned long range_end;
40 unsigned int nr; 41 unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
97} 98}
98 99
99static inline void 100static inline void
100tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) 101tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
101{ 102{
102 tlb->mm = mm; 103 tlb->mm = mm;
103 tlb->fullmm = fullmm; 104 tlb->fullmm = !(start | (end+1));
105 tlb->start = start;
106 tlb->end = end;
104 tlb->vma = NULL; 107 tlb->vma = NULL;
105 tlb->max = ARRAY_SIZE(tlb->local); 108 tlb->max = ARRAY_SIZE(tlb->local);
106 tlb->pages = tlb->local; 109 tlb->pages = tlb->local;
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 6ad781b21c08..3881fd115ebb 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -423,6 +423,7 @@ el0_da:
423 * Data abort handling 423 * Data abort handling
424 */ 424 */
425 mrs x0, far_el1 425 mrs x0, far_el1
426 bic x0, x0, #(0xff << 56)
426 disable_step x1 427 disable_step x1
427 isb 428 isb
428 enable_dbg 429 enable_dbg
@@ -476,6 +477,8 @@ el0_undef:
476 * Undefined instruction 477 * Undefined instruction
477 */ 478 */
478 mov x0, sp 479 mov x0, sp
480 // enable interrupts before calling the main handler
481 enable_irq
479 b do_undefinstr 482 b do_undefinstr
480el0_dbg: 483el0_dbg:
481 /* 484 /*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index e8b8357aedb4..1f2e4d5a5c0f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -21,6 +21,7 @@
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/signal.h> 23#include <linux/signal.h>
24#include <linux/hardirq.h>
24 25
25#include <asm/fpsimd.h> 26#include <asm/fpsimd.h>
26#include <asm/cputype.h> 27#include <asm/cputype.h>
@@ -83,6 +84,33 @@ void fpsimd_flush_thread(void)
83 fpsimd_load_state(&current->thread.fpsimd_state); 84 fpsimd_load_state(&current->thread.fpsimd_state);
84} 85}
85 86
87#ifdef CONFIG_KERNEL_MODE_NEON
88
89/*
90 * Kernel-side NEON support functions
91 */
92void kernel_neon_begin(void)
93{
94 /* Avoid using the NEON in interrupt context */
95 BUG_ON(in_interrupt());
96 preempt_disable();
97
98 if (current->mm)
99 fpsimd_save_state(&current->thread.fpsimd_state);
100}
101EXPORT_SYMBOL(kernel_neon_begin);
102
103void kernel_neon_end(void)
104{
105 if (current->mm)
106 fpsimd_load_state(&current->thread.fpsimd_state);
107
108 preempt_enable();
109}
110EXPORT_SYMBOL(kernel_neon_end);
111
112#endif /* CONFIG_KERNEL_MODE_NEON */
113
86/* 114/*
87 * FP/SIMD support code initialisation. 115 * FP/SIMD support code initialisation.
88 */ 116 */
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae49e729..7090c126797c 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -112,6 +112,14 @@
112 .quad TEXT_OFFSET // Image load offset from start of RAM 112 .quad TEXT_OFFSET // Image load offset from start of RAM
113 .quad 0 // reserved 113 .quad 0 // reserved
114 .quad 0 // reserved 114 .quad 0 // reserved
115 .quad 0 // reserved
116 .quad 0 // reserved
117 .quad 0 // reserved
118 .byte 0x41 // Magic number, "ARM\x64"
119 .byte 0x52
120 .byte 0x4d
121 .byte 0x64
122 .word 0 // reserved
115 123
116ENTRY(stext) 124ENTRY(stext)
117 mov x21, x0 // x21=FDT 125 mov x21, x0 // x21=FDT
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf8..cea1594ff933 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
107static int 107static int
108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) 108armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
109{ 109{
110 int mapping = (*event_map)[config]; 110 int mapping;
111
112 if (config >= PERF_COUNT_HW_MAX)
113 return -EINVAL;
114
115 mapping = (*event_map)[config];
111 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; 116 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
112} 117}
113 118
@@ -317,7 +322,13 @@ validate_event(struct pmu_hw_events *hw_events,
317 struct hw_perf_event fake_event = event->hw; 322 struct hw_perf_event fake_event = event->hw;
318 struct pmu *leader_pmu = event->group_leader->pmu; 323 struct pmu *leader_pmu = event->group_leader->pmu;
319 324
320 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) 325 if (is_software_event(event))
326 return 1;
327
328 if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
329 return 1;
330
331 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
321 return 1; 332 return 1;
322 333
323 return armpmu->get_event_idx(hw_events, &fake_event) >= 0; 334 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -773,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
773/* 784/*
774 * PMXEVTYPER: Event selection reg 785 * PMXEVTYPER: Event selection reg
775 */ 786 */
776#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ 787#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
777#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ 788#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
778 789
779/* 790/*
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index add6ea616843..bca4c1c2052a 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -328,9 +328,6 @@ static int c_show(struct seq_file *m, void *v)
328#ifdef CONFIG_SMP 328#ifdef CONFIG_SMP
329 seq_printf(m, "processor\t: %d\n", i); 329 seq_printf(m, "processor\t: %d\n", i);
330#endif 330#endif
331 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
332 loops_per_jiffy / (500000UL/HZ),
333 loops_per_jiffy / (5000UL/HZ) % 100);
334 } 331 }
335 332
336 /* dump out the processor features */ 333 /* dump out the processor features */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index fee5cce83450..78db90dcc910 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -223,11 +223,7 @@ asmlinkage void secondary_start_kernel(void)
223 223
224void __init smp_cpus_done(unsigned int max_cpus) 224void __init smp_cpus_done(unsigned int max_cpus)
225{ 225{
226 unsigned long bogosum = loops_per_jiffy * num_online_cpus(); 226 pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
227
228 pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
229 num_online_cpus(), bogosum / (500000/HZ),
230 (bogosum / (5000/HZ)) % 100);
231} 227}
232 228
233void __init smp_prepare_boot_cpu(void) 229void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f5e55747242f..f8ab9d8e2ea3 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -71,6 +71,7 @@ SECTIONS
71 71
72 RO_DATA(PAGE_SIZE) 72 RO_DATA(PAGE_SIZE)
73 EXCEPTION_TABLE(8) 73 EXCEPTION_TABLE(8)
74 NOTES
74 _etext = .; /* End of text and rodata section */ 75 _etext = .; /* End of text and rodata section */
75 76
76 . = ALIGN(PAGE_SIZE); 77 . = ALIGN(PAGE_SIZE);
@@ -122,8 +123,6 @@ SECTIONS
122 } 123 }
123 _edata_loc = __data_loc + SIZEOF(.data); 124 _edata_loc = __data_loc + SIZEOF(.data);
124 125
125 NOTES
126
127 BSS_SECTION(0, 0, 0) 126 BSS_SECTION(0, 0, 0)
128 _end = .; 127 _end = .;
129 128
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b72..1ac0bbbdddb2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
214 mrs x21, tpidr_el1 214 mrs x21, tpidr_el1
215 mrs x22, amair_el1 215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1 216 mrs x23, cntkctl_el1
217 mrs x24, par_el1
217 218
218 stp x4, x5, [x3] 219 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16] 220 stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
225 stp x18, x19, [x3, #112] 226 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128] 227 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144] 228 stp x22, x23, [x3, #144]
229 str x24, [x3, #160]
228.endm 230.endm
229 231
230.macro restore_sysregs 232.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
243 ldp x18, x19, [x3, #112] 245 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128] 246 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144] 247 ldp x22, x23, [x3, #144]
248 ldr x24, [x3, #160]
246 249
247 msr vmpidr_el2, x4 250 msr vmpidr_el2, x4
248 msr csselr_el1, x5 251 msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
264 msr tpidr_el1, x21 267 msr tpidr_el1, x21
265 msr amair_el1, x22 268 msr amair_el1, x22
266 msr cntkctl_el1, x23 269 msr cntkctl_el1, x23
270 msr par_el1, x24
267.endm 271.endm
268 272
269.macro skip_32bit_state tmp, target 273.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
600 604
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); 605// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa) 606ENTRY(__kvm_tlb_flush_vmid_ipa)
607 dsb ishst
608
603 kern_hyp_va x0 609 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR] 610 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2 611 msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
621ENDPROC(__kvm_tlb_flush_vmid_ipa) 627ENDPROC(__kvm_tlb_flush_vmid_ipa)
622 628
623ENTRY(__kvm_flush_vm_context) 629ENTRY(__kvm_flush_vm_context)
630 dsb ishst
624 tlbi alle1is 631 tlbi alle1is
625 ic ialluis 632 ic ialluis
626 dsb sy 633 dsb sy
@@ -753,6 +760,10 @@ el1_trap:
753 */ 760 */
754 tbnz x1, #7, 1f // S1PTW is set 761 tbnz x1, #7, 1f // S1PTW is set
755 762
763 /* Preserve PAR_EL1 */
764 mrs x3, par_el1
765 push x3, xzr
766
756 /* 767 /*
757 * Permission fault, HPFAR_EL2 is invalid. 768 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA. 769 * Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
766 777
767 /* Read result */ 778 /* Read result */
768 mrs x3, par_el1 779 mrs x3, par_el1
780 pop x0, xzr // Restore PAR_EL1 from the stack
781 msr par_el1, x0
769 tbnz x3, #0, 3f // Bail out if we failed the translation 782 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA 783 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR 784 lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753b..02e9d09e1d80 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
211 /* FAR_EL1 */ 211 /* FAR_EL1 */
212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 212 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
213 NULL, reset_unknown, FAR_EL1 }, 213 NULL, reset_unknown, FAR_EL1 },
214 /* PAR_EL1 */
215 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
216 NULL, reset_unknown, PAR_EL1 },
214 217
215 /* PMINTENSET_EL1 */ 218 /* PMINTENSET_EL1 */
216 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 219 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a8d1059b91b2..f557ebbe7013 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -296,6 +296,7 @@ void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
296static void __init map_mem(void) 296static void __init map_mem(void)
297{ 297{
298 struct memblock_region *reg; 298 struct memblock_region *reg;
299 phys_addr_t limit;
299 300
300 /* 301 /*
301 * Temporarily limit the memblock range. We need to do this as 302 * Temporarily limit the memblock range. We need to do this as
@@ -303,9 +304,11 @@ static void __init map_mem(void)
303 * memory addressable from the initial direct kernel mapping. 304 * memory addressable from the initial direct kernel mapping.
304 * 305 *
305 * The initial direct kernel mapping, located at swapper_pg_dir, 306 * The initial direct kernel mapping, located at swapper_pg_dir,
306 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (aligned). 307 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
308 * aligned to 2MB as per Documentation/arm64/booting.txt).
307 */ 309 */
308 memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE); 310 limit = PHYS_OFFSET + PGDIR_SIZE;
311 memblock_set_current_limit(limit);
309 312
310 /* map all the memory banks */ 313 /* map all the memory banks */
311 for_each_memblock(memory, reg) { 314 for_each_memblock(memory, reg) {
@@ -315,6 +318,22 @@ static void __init map_mem(void)
315 if (start >= end) 318 if (start >= end)
316 break; 319 break;
317 320
321#ifndef CONFIG_ARM64_64K_PAGES
322 /*
323 * For the first memory bank align the start address and
324 * current memblock limit to prevent create_mapping() from
325 * allocating pte page tables from unmapped memory.
326 * When 64K pages are enabled, the pte page table for the
327 * first PGDIR_SIZE is already present in swapper_pg_dir.
328 */
329 if (start < limit)
330 start = ALIGN(start, PMD_SIZE);
331 if (end < limit) {
332 limit = end & PMD_MASK;
333 memblock_set_current_limit(limit);
334 }
335#endif
336
318 create_mapping(start, __phys_to_virt(start), end - start); 337 create_mapping(start, __phys_to_virt(start), end - start);
319 } 338 }
320 339
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index a82ae8868077..b1b31bbc967b 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -95,10 +95,6 @@ ENTRY(cpu_do_switch_mm)
95 ret 95 ret
96ENDPROC(cpu_do_switch_mm) 96ENDPROC(cpu_do_switch_mm)
97 97
98cpu_name:
99 .ascii "AArch64 Processor"
100 .align
101
102 .section ".text.init", #alloc, #execinstr 98 .section ".text.init", #alloc, #execinstr
103 99
104/* 100/*
@@ -151,7 +147,7 @@ ENTRY(__cpu_setup)
151 * both user and kernel. 147 * both user and kernel.
152 */ 148 */
153 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \ 149 ldr x10, =TCR_TxSZ(VA_BITS) | TCR_FLAGS | TCR_IPS_40BIT | \
154 TCR_ASID16 | (1 << 31) 150 TCR_ASID16 | TCR_TBI0 | (1 << 31)
155#ifdef CONFIG_ARM64_64K_PAGES 151#ifdef CONFIG_ARM64_64K_PAGES
156 orr x10, x10, TCR_TG0_64K 152 orr x10, x10, TCR_TG0_64K
157 orr x10, x10, TCR_TG1_64K 153 orr x10, x10, TCR_TG1_64K
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f91431963452..7de083d19b7e 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
150static struct platform_device rmt_ts_device = { 150static struct platform_device rmt_ts_device = {
151 .name = "ucb1400_ts", 151 .name = "ucb1400_ts",
152 .id = -1, 152 .id = -1,
153 }
154}; 153};
155#endif 154#endif
156 155
diff --git a/arch/avr32/oprofile/op_model_avr32.c b/arch/avr32/oprofile/op_model_avr32.c
index f74b7809e089..08308be2c02c 100644
--- a/arch/avr32/oprofile/op_model_avr32.c
+++ b/arch/avr32/oprofile/op_model_avr32.c
@@ -97,8 +97,7 @@ static irqreturn_t avr32_perf_counter_interrupt(int irq, void *dev_id)
97 return IRQ_HANDLED; 97 return IRQ_HANDLED;
98} 98}
99 99
100static int avr32_perf_counter_create_files(struct super_block *sb, 100static int avr32_perf_counter_create_files(struct dentry *root)
101 struct dentry *root)
102{ 101{
103 struct dentry *dir; 102 struct dentry *dir;
104 unsigned int i; 103 unsigned int i;
@@ -106,21 +105,21 @@ static int avr32_perf_counter_create_files(struct super_block *sb,
106 105
107 for (i = 0; i < NR_counter; i++) { 106 for (i = 0; i < NR_counter; i++) {
108 snprintf(filename, sizeof(filename), "%u", i); 107 snprintf(filename, sizeof(filename), "%u", i);
109 dir = oprofilefs_mkdir(sb, root, filename); 108 dir = oprofilefs_mkdir(root, filename);
110 109
111 oprofilefs_create_ulong(sb, dir, "enabled", 110 oprofilefs_create_ulong(dir, "enabled",
112 &counter[i].enabled); 111 &counter[i].enabled);
113 oprofilefs_create_ulong(sb, dir, "event", 112 oprofilefs_create_ulong(dir, "event",
114 &counter[i].event); 113 &counter[i].event);
115 oprofilefs_create_ulong(sb, dir, "count", 114 oprofilefs_create_ulong(dir, "count",
116 &counter[i].count); 115 &counter[i].count);
117 116
118 /* Dummy entries */ 117 /* Dummy entries */
119 oprofilefs_create_ulong(sb, dir, "kernel", 118 oprofilefs_create_ulong(dir, "kernel",
120 &counter[i].kernel); 119 &counter[i].kernel);
121 oprofilefs_create_ulong(sb, dir, "user", 120 oprofilefs_create_ulong(dir, "user",
122 &counter[i].user); 121 &counter[i].user);
123 oprofilefs_create_ulong(sb, dir, "unit_mask", 122 oprofilefs_create_ulong(dir, "unit_mask",
124 &counter[i].unit_mask); 123 &counter[i].unit_mask);
125 } 124 }
126 125
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index 0aa35f0eb0db..deb67843693c 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -320,7 +320,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
320 * are examined. 320 * are examined.
321 */ 321 */
322 322
323void __init pcibios_fixup_bus(struct pci_bus *bus) 323void pcibios_fixup_bus(struct pci_bus *bus)
324{ 324{
325#if 0 325#if 0
326 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); 326 printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d055..77d442ab28c8 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
158endmenu 158endmenu
159 159
160source "init/Kconfig" 160source "init/Kconfig"
161source "kernel/Kconfig.freezer"
161source "drivers/Kconfig" 162source "drivers/Kconfig"
162source "fs/Kconfig" 163source "fs/Kconfig"
163 164
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 5a768ad8e893..566642266324 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -43,6 +43,7 @@ config IA64
43 select SYSCTL_ARCH_UNALIGN_NO_WARN 43 select SYSCTL_ARCH_UNALIGN_NO_WARN
44 select HAVE_MOD_ARCH_SPECIFIC 44 select HAVE_MOD_ARCH_SPECIFIC
45 select MODULES_USE_ELF_RELA 45 select MODULES_USE_ELF_RELA
46 select ARCH_USE_CMPXCHG_LOCKREF
46 default y 47 default y
47 help 48 help
48 The Itanium Processor Family is Intel's 64-bit successor to 49 The Itanium Processor Family is Intel's 64-bit successor to
@@ -565,9 +566,9 @@ config KEXEC
565 566
566 It is an ongoing process to be certain the hardware in a machine 567 It is an ongoing process to be certain the hardware in a machine
567 is properly shutdown, so do not be surprised if this code does not 568 is properly shutdown, so do not be surprised if this code does not
568 initially work for you. It may help to enable device hotplugging 569 initially work for you. As of this writing the exact hardware
569 support. As of this writing the exact hardware interface is 570 interface is strongly in flux, so no good recommendation can be
570 strongly in flux, so no good recommendation can be made. 571 made.
571 572
572config CRASH_DUMP 573config CRASH_DUMP
573 bool "kernel crash dumps" 574 bool "kernel crash dumps"
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 05b03ecd7933..a3456f34f672 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += kvm_para.h 4generic-y += kvm_para.h
5generic-y += trace_clock.h 5generic-y += trace_clock.h
6generic-y += vtime.h \ No newline at end of file
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 8e20bff39f79..c27eccd33349 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -425,13 +425,7 @@ __fls (unsigned long x)
425 425
426#include <asm-generic/bitops/fls64.h> 426#include <asm-generic/bitops/fls64.h>
427 427
428/* 428#include <asm-generic/bitops/builtin-ffs.h>
429 * ffs: find first bit set. This is defined the same way as the libc and
430 * compiler builtin ffs routines, therefore differs in spirit from the above
431 * ffz (man ffs): it operates on "int" values only and the result value is the
432 * bit number + 1. ffs(0) is defined to return zero.
433 */
434#define ffs(x) __builtin_ffs(x)
435 429
436/* 430/*
437 * hweightN: returns the hamming weight (i.e. the number 431 * hweightN: returns the hamming weight (i.e. the number
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h
index 1ed4c8fedb83..185d3d18d0ec 100644
--- a/arch/ia64/include/asm/dmi.h
+++ b/arch/ia64/include/asm/dmi.h
@@ -7,6 +7,6 @@
7/* Use normal IO mappings for DMI */ 7/* Use normal IO mappings for DMI */
8#define dmi_ioremap ioremap 8#define dmi_ioremap ioremap
9#define dmi_iounmap(x,l) iounmap(x) 9#define dmi_iounmap(x,l) iounmap(x)
10#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) 10#define dmi_alloc(l) kzalloc(l, GFP_ATOMIC)
11 11
12#endif 12#endif
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 54ff557d474e..45698cd15b7b 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -102,6 +102,11 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
102 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 102 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
103} 103}
104 104
105static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
106{
107 return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
108}
109
105static inline int arch_spin_is_locked(arch_spinlock_t *lock) 110static inline int arch_spin_is_locked(arch_spinlock_t *lock)
106{ 111{
107 return __ticket_spin_is_locked(lock); 112 return __ticket_spin_is_locked(lock);
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de01954..bc5efc7c3f3f 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
22 * unmapping a portion of the virtual address space, these hooks are called according to 22 * unmapping a portion of the virtual address space, these hooks are called according to
23 * the following template: 23 * the following template:
24 * 24 *
25 * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM 25 * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
26 * { 26 * {
27 * for each vma that needs a shootdown do { 27 * for each vma that needs a shootdown do {
28 * tlb_start_vma(tlb, vma); 28 * tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
58 unsigned int max; 58 unsigned int max;
59 unsigned char fullmm; /* non-zero means full mm flush */ 59 unsigned char fullmm; /* non-zero means full mm flush */
60 unsigned char need_flush; /* really unmapped some PTEs? */ 60 unsigned char need_flush; /* really unmapped some PTEs? */
61 unsigned long start, end;
61 unsigned long start_addr; 62 unsigned long start_addr;
62 unsigned long end_addr; 63 unsigned long end_addr;
63 struct page **pages; 64 struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
155 156
156 157
157static inline void 158static inline void
158tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 159tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
159{ 160{
160 tlb->mm = mm; 161 tlb->mm = mm;
161 tlb->max = ARRAY_SIZE(tlb->local); 162 tlb->max = ARRAY_SIZE(tlb->local);
162 tlb->pages = tlb->local; 163 tlb->pages = tlb->local;
163 tlb->nr = 0; 164 tlb->nr = 0;
164 tlb->fullmm = full_mm_flush; 165 tlb->fullmm = !(start | (end+1));
166 tlb->start = start;
167 tlb->end = end;
165 tlb->start_addr = ~0UL; 168 tlb->start_addr = ~0UL;
166} 169}
167 170
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5b2dc0d10c8f..bdfd8789b376 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1560,6 +1560,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1560 return 0; 1560 return 0;
1561} 1561}
1562 1562
1563void kvm_arch_memslots_updated(struct kvm *kvm)
1564{
1565}
1566
1563int kvm_arch_prepare_memory_region(struct kvm *kvm, 1567int kvm_arch_prepare_memory_region(struct kvm *kvm,
1564 struct kvm_memory_slot *memslot, 1568 struct kvm_memory_slot *memslot,
1565 struct kvm_userspace_memory_region *mem, 1569 struct kvm_userspace_memory_region *mem,
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
index 6083088c0cca..dacd9f911f71 100644
--- a/arch/m68k/amiga/platform.c
+++ b/arch/m68k/amiga/platform.c
@@ -56,7 +56,7 @@ static int __init amiga_init_bus(void)
56 n = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2; 56 n = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
57 pdev = platform_device_register_simple("amiga-zorro", -1, 57 pdev = platform_device_register_simple("amiga-zorro", -1,
58 zorro_resources, n); 58 zorro_resources, n);
59 return PTR_RET(pdev); 59 return PTR_ERR_OR_ZERO(pdev);
60} 60}
61 61
62subsys_initcall(amiga_init_bus); 62subsys_initcall(amiga_init_bus);
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d49..121a6660ad4e 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/natfeat.h> 19#include <asm/natfeat.h>
20 20
21extern long nf_get_id_phys(unsigned long feature_name);
22
21asm("\n" 23asm("\n"
22" .global nf_get_id,nf_call\n" 24" .global nf_get_id_phys,nf_call\n"
23"nf_get_id:\n" 25"nf_get_id_phys:\n"
24" .short 0x7300\n" 26" .short 0x7300\n"
25" rts\n" 27" rts\n"
26"nf_call:\n" 28"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
29"1: moveq.l #0,%d0\n" 31"1: moveq.l #0,%d0\n"
30" rts\n" 32" rts\n"
31" .section __ex_table,\"a\"\n" 33" .section __ex_table,\"a\"\n"
32" .long nf_get_id,1b\n" 34" .long nf_get_id_phys,1b\n"
33" .long nf_call,1b\n" 35" .long nf_call,1b\n"
34" .previous"); 36" .previous");
35EXPORT_SYMBOL_GPL(nf_get_id);
36EXPORT_SYMBOL_GPL(nf_call); 37EXPORT_SYMBOL_GPL(nf_call);
37 38
39long nf_get_id(const char *feature_name)
40{
41 /* feature_name may be in vmalloc()ed memory, so make a copy */
42 char name_copy[32];
43 size_t n;
44
45 n = strlcpy(name_copy, feature_name, sizeof(name_copy));
46 if (n >= sizeof(name_copy))
47 return 0;
48
49 return nf_get_id_phys(virt_to_phys(name_copy));
50}
51EXPORT_SYMBOL_GPL(nf_get_id);
52
38void nfprint(const char *fmt, ...) 53void nfprint(const char *fmt, ...)
39{ 54{
40 static char buf[256]; 55 static char buf[256];
@@ -43,7 +58,7 @@ void nfprint(const char *fmt, ...)
43 58
44 va_start(ap, fmt); 59 va_start(ap, fmt);
45 n = vsnprintf(buf, 256, fmt, ap); 60 n = vsnprintf(buf, 256, fmt, ap);
46 nf_call(nf_get_id("NF_STDERR"), buf); 61 nf_call(nf_get_id("NF_STDERR"), virt_to_phys(buf));
47 va_end(ap); 62 va_end(ap);
48} 63}
49 64
@@ -68,7 +83,7 @@ void nf_init(void)
68 id = nf_get_id("NF_NAME"); 83 id = nf_get_id("NF_NAME");
69 if (!id) 84 if (!id)
70 return; 85 return;
71 nf_call(id, buf, 256); 86 nf_call(id, virt_to_phys(buf), 256);
72 buf[255] = 0; 87 buf[255] = 0;
73 88
74 pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16, 89 pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16,
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index e3011338ab40..0721858fbd1e 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -41,8 +41,8 @@ static inline s32 nfhd_read_write(u32 major, u32 minor, u32 rwflag, u32 recno,
41static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, 41static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks,
42 u32 *blocksize) 42 u32 *blocksize)
43{ 43{
44 return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, blocks, 44 return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor,
45 blocksize); 45 virt_to_phys(blocks), virt_to_phys(blocksize));
46} 46}
47 47
48static LIST_HEAD(nfhd_list); 48static LIST_HEAD(nfhd_list);
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 6685bf45c2c3..57e8c8fb5eba 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/io.h>
18 19
19#include <asm/natfeat.h> 20#include <asm/natfeat.h>
20 21
@@ -25,17 +26,18 @@ static struct tty_driver *nfcon_tty_driver;
25static void nfputs(const char *str, unsigned int count) 26static void nfputs(const char *str, unsigned int count)
26{ 27{
27 char buf[68]; 28 char buf[68];
29 unsigned long phys = virt_to_phys(buf);
28 30
29 buf[64] = 0; 31 buf[64] = 0;
30 while (count > 64) { 32 while (count > 64) {
31 memcpy(buf, str, 64); 33 memcpy(buf, str, 64);
32 nf_call(stderr_id, buf); 34 nf_call(stderr_id, phys);
33 str += 64; 35 str += 64;
34 count -= 64; 36 count -= 64;
35 } 37 }
36 memcpy(buf, str, count); 38 memcpy(buf, str, count);
37 buf[count] = 0; 39 buf[count] = 0;
38 nf_call(stderr_id, buf); 40 nf_call(stderr_id, phys);
39} 41}
40 42
41static void nfcon_write(struct console *con, const char *str, 43static void nfcon_write(struct console *con, const char *str,
@@ -79,7 +81,7 @@ static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
79{ 81{
80 char temp[2] = { ch, 0 }; 82 char temp[2] = { ch, 0 };
81 83
82 nf_call(stderr_id, temp); 84 nf_call(stderr_id, virt_to_phys(temp));
83 return 1; 85 return 1;
84} 86}
85 87
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 695cd737a42e..a0985fd088d1 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -195,7 +195,8 @@ static struct net_device * __init nfeth_probe(int unit)
195 char mac[ETH_ALEN], host_ip[32], local_ip[32]; 195 char mac[ETH_ALEN], host_ip[32], local_ip[32];
196 int err; 196 int err;
197 197
198 if (!nf_call(nfEtherID + XIF_GET_MAC, unit, mac, ETH_ALEN)) 198 if (!nf_call(nfEtherID + XIF_GET_MAC, unit, virt_to_phys(mac),
199 ETH_ALEN))
199 return NULL; 200 return NULL;
200 201
201 dev = alloc_etherdev(sizeof(struct nfeth_private)); 202 dev = alloc_etherdev(sizeof(struct nfeth_private));
@@ -217,9 +218,9 @@ static struct net_device * __init nfeth_probe(int unit)
217 } 218 }
218 219
219 nf_call(nfEtherID + XIF_GET_IPHOST, unit, 220 nf_call(nfEtherID + XIF_GET_IPHOST, unit,
220 host_ip, sizeof(host_ip)); 221 virt_to_phys(host_ip), sizeof(host_ip));
221 nf_call(nfEtherID + XIF_GET_IPATARI, unit, 222 nf_call(nfEtherID + XIF_GET_IPATARI, unit,
222 local_ip, sizeof(local_ip)); 223 virt_to_phys(local_ip), sizeof(local_ip));
223 224
224 netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip, 225 netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip,
225 local_ip, mac); 226 local_ip, mac);
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9f..ef881cfbbca9 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
15 unsigned long long n64; \ 15 unsigned long long n64; \
16 } __n; \ 16 } __n; \
17 unsigned long __rem, __upper; \ 17 unsigned long __rem, __upper; \
18 unsigned long __base = (base); \
18 \ 19 \
19 __n.n64 = (n); \ 20 __n.n64 = (n); \
20 if ((__upper = __n.n32[0])) { \ 21 if ((__upper = __n.n32[0])) { \
21 asm ("divul.l %2,%1:%0" \ 22 asm ("divul.l %2,%1:%0" \
22 : "=d" (__n.n32[0]), "=d" (__upper) \ 23 : "=d" (__n.n32[0]), "=d" (__upper) \
23 : "d" (base), "0" (__n.n32[0])); \ 24 : "d" (__base), "0" (__n.n32[0])); \
24 } \ 25 } \
25 asm ("divu.l %2,%1:%0" \ 26 asm ("divu.l %2,%1:%0" \
26 : "=d" (__n.n32[1]), "=d" (__rem) \ 27 : "=d" (__n.n32[1]), "=d" (__rem) \
27 : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ 28 : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
28 (n) = __n.n64; \ 29 (n) = __n.n64; \
29 __rem; \ 30 __rem; \
30}) 31})
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 7ef4115b8c4a..a823cd73dc09 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -3,7 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6#include <linux/hardirq.h> 6#include <linux/preempt_mask.h>
7#endif 7#endif
8#include <linux/preempt.h> 8#include <linux/preempt.h>
9#include <asm/thread_info.h> 9#include <asm/thread_info.h>
@@ -67,6 +67,10 @@ static inline void arch_local_irq_restore(unsigned long flags)
67 67
68static inline bool arch_irqs_disabled_flags(unsigned long flags) 68static inline bool arch_irqs_disabled_flags(unsigned long flags)
69{ 69{
70 if (MACH_IS_ATARI) {
71 /* Ignore HSYNC = ipl 2 on Atari */
72 return (flags & ~(ALLOWINT | 0x200)) != 0;
73 }
70 return (flags & ~ALLOWINT) != 0; 74 return (flags & ~ALLOWINT) != 0;
71} 75}
72 76
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index bea6bcf8f9b8..7eb9792009f8 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -90,7 +90,7 @@ static int __init rtc_init(void)
90 return -ENODEV; 90 return -ENODEV;
91 91
92 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 92 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
93 return PTR_RET(pdev); 93 return PTR_ERR_OR_ZERO(pdev);
94} 94}
95 95
96module_init(rtc_init); 96module_init(rtc_init);
diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c
index b33f97a13e6d..df9679238b6d 100644
--- a/arch/m68k/platform/coldfire/pci.c
+++ b/arch/m68k/platform/coldfire/pci.c
@@ -319,7 +319,6 @@ static int __init mcf_pci_init(void)
319 pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq); 319 pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq);
320 pci_bus_size_bridges(rootbus); 320 pci_bus_size_bridges(rootbus);
321 pci_bus_assign_resources(rootbus); 321 pci_bus_assign_resources(rootbus);
322 pci_enable_bridges(rootbus);
323 return 0; 322 return 0;
324} 323}
325 324
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 658542b914fc..078bb744b5fe 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -338,6 +338,6 @@ static __init int q40_add_kbd_device(void)
338 return -ENODEV; 338 return -ENODEV;
339 339
340 pdev = platform_device_register_simple("q40kbd", -1, NULL, 0); 340 pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
341 return PTR_RET(pdev); 341 return PTR_ERR_OR_ZERO(pdev);
342} 342}
343arch_initcall(q40_add_kbd_device); 343arch_initcall(q40_add_kbd_device);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff4..3f6659cbc969 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,8 @@ config MICROBLAZE
28 select GENERIC_CLOCKEVENTS 28 select GENERIC_CLOCKEVENTS
29 select GENERIC_IDLE_POLL_SETUP 29 select GENERIC_IDLE_POLL_SETUP
30 select MODULES_USE_ELF_RELA 30 select MODULES_USE_ELF_RELA
31 select CLONE_BACKWARDS 31 select CLONE_BACKWARDS3
32 select CLKSRC_OF
32 33
33config SWAP 34config SWAP
34 def_bool n 35 def_bool n
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 0a603d3ecf24..40350a3c24e9 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -72,7 +72,7 @@ all: linux.bin
72archclean: 72archclean:
73 $(Q)$(MAKE) $(clean)=$(boot) 73 $(Q)$(MAKE) $(clean)=$(boot)
74 74
75linux.bin linux.bin.gz: vmlinux 75linux.bin linux.bin.gz linux.bin.ub: vmlinux
76 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 76 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
77 77
78simpleImage.%: vmlinux 78simpleImage.%: vmlinux
@@ -81,6 +81,7 @@ simpleImage.%: vmlinux
81define archhelp 81define archhelp
82 echo '* linux.bin - Create raw binary' 82 echo '* linux.bin - Create raw binary'
83 echo ' linux.bin.gz - Create compressed raw binary' 83 echo ' linux.bin.gz - Create compressed raw binary'
84 echo ' linux.bin.ub - Create U-Boot wrapped raw binary'
84 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in' 85 echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
85 echo ' - stripped elf with fdt blob' 86 echo ' - stripped elf with fdt blob'
86 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob' 87 echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 80fe54fb7ca3..8e211cc28dac 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -2,12 +2,15 @@
2# arch/microblaze/boot/Makefile 2# arch/microblaze/boot/Makefile
3# 3#
4 4
5targets := linux.bin linux.bin.gz simpleImage.% 5targets := linux.bin linux.bin.gz linux.bin.ub simpleImage.%
6 6
7OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary 7OBJCOPYFLAGS := -R .note -R .comment -R .note.gnu.build-id -O binary
8 8
9$(obj)/linux.bin: vmlinux FORCE 9$(obj)/linux.bin: vmlinux FORCE
10 $(call if_changed,objcopy) 10 $(call if_changed,objcopy)
11 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
12
13$(obj)/linux.bin.ub: $(obj)/linux.bin FORCE
11 $(call if_changed,uimage) 14 $(call if_changed,uimage)
12 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' 15 @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
13 16
@@ -22,8 +25,6 @@ quiet_cmd_strip = STRIP $@
22 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ 25 cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \
23 -K _fdt_start vmlinux -o $@ 26 -K _fdt_start vmlinux -o $@
24 27
25UIMAGE_IN = $@
26UIMAGE_OUT = $@.ub
27UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) 28UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR)
28 29
29$(obj)/simpleImage.%: vmlinux FORCE 30$(obj)/simpleImage.%: vmlinux FORCE
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 20c5e8e5121b..9977816c5ad3 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
50 50
51extern void kdump_move_device_tree(void); 51extern void kdump_move_device_tree(void);
52 52
53/* CPU OF node matching */
54struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
55
56#endif /* __ASSEMBLY__ */ 53#endif /* __ASSEMBLY__ */
57#endif /* __KERNEL__ */ 54#endif /* __KERNEL__ */
58 55
diff --git a/arch/microblaze/include/asm/selfmod.h b/arch/microblaze/include/asm/selfmod.h
deleted file mode 100644
index c42aff2e6cd0..000000000000
--- a/arch/microblaze/include/asm/selfmod.h
+++ /dev/null
@@ -1,24 +0,0 @@
1/*
2 * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu>
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_SELFMOD_H
10#define _ASM_MICROBLAZE_SELFMOD_H
11
12/*
13 * BARRIER_BASE_ADDR is constant address for selfmod function.
14 * do not change this value - selfmod function is in
15 * arch/microblaze/kernel/selfmod.c: selfmod_function()
16 *
17 * last 16 bits is used for storing register offset
18 */
19
20#define BARRIER_BASE_ADDR 0x1234ff00
21
22void selfmod_function(const int *arr_fce, const unsigned int base);
23
24#endif /* _ASM_MICROBLAZE_SELFMOD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 928c950fc14c..5b0e512c78e5 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -7,7 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
7CFLAGS_REMOVE_timer.o = -pg 7CFLAGS_REMOVE_timer.o = -pg
8CFLAGS_REMOVE_intc.o = -pg 8CFLAGS_REMOVE_intc.o = -pg
9CFLAGS_REMOVE_early_printk.o = -pg 9CFLAGS_REMOVE_early_printk.o = -pg
10CFLAGS_REMOVE_selfmod.o = -pg
11CFLAGS_REMOVE_heartbeat.o = -pg 10CFLAGS_REMOVE_heartbeat.o = -pg
12CFLAGS_REMOVE_ftrace.o = -pg 11CFLAGS_REMOVE_ftrace.o = -pg
13CFLAGS_REMOVE_process.o = -pg 12CFLAGS_REMOVE_process.o = -pg
@@ -23,7 +22,6 @@ obj-y += dma.o exceptions.o \
23obj-y += cpu/ 22obj-y += cpu/
24 23
25obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 24obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
26obj-$(CONFIG_SELFMOD) += selfmod.o
27obj-$(CONFIG_HEART_BEAT) += heartbeat.o 25obj-$(CONFIG_HEART_BEAT) += heartbeat.o
28obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o 26obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
29obj-$(CONFIG_MMU) += misc.o 27obj-$(CONFIG_MMU) += misc.o
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 410398f6db55..c9203b1007aa 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -39,6 +39,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = {
39 {"8.30.a", 0x17}, 39 {"8.30.a", 0x17},
40 {"8.40.a", 0x18}, 40 {"8.40.a", 0x18},
41 {"8.40.b", 0x19}, 41 {"8.40.b", 0x19},
42 {"9.0", 0x1b},
43 {"9.1", 0x1d},
42 {NULL, 0}, 44 {NULL, 0},
43}; 45};
44 46
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index d85fa3a2b0f8..581451ad4687 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2012-2013 Xilinx, Inc.
3 * Copyright (C) 2007-2009 PetaLogix 4 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 5 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 6 *
@@ -8,23 +9,15 @@
8 * for more details. 9 * for more details.
9 */ 10 */
10 11
11#include <linux/init.h>
12#include <linux/irqdomain.h> 12#include <linux/irqdomain.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <asm/page.h> 14#include <linux/of_address.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/bug.h> 16#include <linux/bug.h>
17 17
18#include <asm/prom.h> 18#include "../../drivers/irqchip/irqchip.h"
19#include <asm/irq.h>
20 19
21#ifdef CONFIG_SELFMOD_INTC 20static void __iomem *intc_baseaddr;
22#include <asm/selfmod.h>
23#define INTC_BASE BARRIER_BASE_ADDR
24#else
25static unsigned int intc_baseaddr;
26#define INTC_BASE intc_baseaddr
27#endif
28 21
29/* No one else should require these constants, so define them locally here. */ 22/* No one else should require these constants, so define them locally here. */
30#define ISR 0x00 /* Interrupt Status Register */ 23#define ISR 0x00 /* Interrupt Status Register */
@@ -50,21 +43,21 @@ static void intc_enable_or_unmask(struct irq_data *d)
50 * acks the irq before calling the interrupt handler 43 * acks the irq before calling the interrupt handler
51 */ 44 */
52 if (irqd_is_level_type(d)) 45 if (irqd_is_level_type(d))
53 out_be32(INTC_BASE + IAR, mask); 46 out_be32(intc_baseaddr + IAR, mask);
54 47
55 out_be32(INTC_BASE + SIE, mask); 48 out_be32(intc_baseaddr + SIE, mask);
56} 49}
57 50
58static void intc_disable_or_mask(struct irq_data *d) 51static void intc_disable_or_mask(struct irq_data *d)
59{ 52{
60 pr_debug("disable: %ld\n", d->hwirq); 53 pr_debug("disable: %ld\n", d->hwirq);
61 out_be32(INTC_BASE + CIE, 1 << d->hwirq); 54 out_be32(intc_baseaddr + CIE, 1 << d->hwirq);
62} 55}
63 56
64static void intc_ack(struct irq_data *d) 57static void intc_ack(struct irq_data *d)
65{ 58{
66 pr_debug("ack: %ld\n", d->hwirq); 59 pr_debug("ack: %ld\n", d->hwirq);
67 out_be32(INTC_BASE + IAR, 1 << d->hwirq); 60 out_be32(intc_baseaddr + IAR, 1 << d->hwirq);
68} 61}
69 62
70static void intc_mask_ack(struct irq_data *d) 63static void intc_mask_ack(struct irq_data *d)
@@ -72,8 +65,8 @@ static void intc_mask_ack(struct irq_data *d)
72 unsigned long mask = 1 << d->hwirq; 65 unsigned long mask = 1 << d->hwirq;
73 66
74 pr_debug("disable_and_ack: %ld\n", d->hwirq); 67 pr_debug("disable_and_ack: %ld\n", d->hwirq);
75 out_be32(INTC_BASE + CIE, mask); 68 out_be32(intc_baseaddr + CIE, mask);
76 out_be32(INTC_BASE + IAR, mask); 69 out_be32(intc_baseaddr + IAR, mask);
77} 70}
78 71
79static struct irq_chip intc_dev = { 72static struct irq_chip intc_dev = {
@@ -90,7 +83,7 @@ unsigned int get_irq(void)
90{ 83{
91 unsigned int hwirq, irq = -1; 84 unsigned int hwirq, irq = -1;
92 85
93 hwirq = in_be32(INTC_BASE + IVR); 86 hwirq = in_be32(intc_baseaddr + IVR);
94 if (hwirq != -1U) 87 if (hwirq != -1U)
95 irq = irq_find_mapping(root_domain, hwirq); 88 irq = irq_find_mapping(root_domain, hwirq);
96 89
@@ -120,40 +113,32 @@ static const struct irq_domain_ops xintc_irq_domain_ops = {
120 .map = xintc_map, 113 .map = xintc_map,
121}; 114};
122 115
123void __init init_IRQ(void) 116static int __init xilinx_intc_of_init(struct device_node *intc,
117 struct device_node *parent)
124{ 118{
125 u32 nr_irq, intr_mask; 119 u32 nr_irq, intr_mask;
126 struct device_node *intc = NULL; 120 int ret;
127#ifdef CONFIG_SELFMOD_INTC 121
128 unsigned int intc_baseaddr = 0; 122 intc_baseaddr = of_iomap(intc, 0);
129 static int arr_func[] = { 123 BUG_ON(!intc_baseaddr);
130 (int)&get_irq, 124
131 (int)&intc_enable_or_unmask, 125 ret = of_property_read_u32(intc, "xlnx,num-intr-inputs", &nr_irq);
132 (int)&intc_disable_or_mask, 126 if (ret < 0) {
133 (int)&intc_mask_ack, 127 pr_err("%s: unable to read xlnx,num-intr-inputs\n", __func__);
134 (int)&intc_ack, 128 return -EINVAL;
135 (int)&intc_end, 129 }
136 0 130
137 }; 131 ret = of_property_read_u32(intc, "xlnx,kind-of-intr", &intr_mask);
138#endif 132 if (ret < 0) {
139 intc = of_find_compatible_node(NULL, NULL, "xlnx,xps-intc-1.00.a"); 133 pr_err("%s: unable to read xlnx,kind-of-intr\n", __func__);
140 BUG_ON(!intc); 134 return -EINVAL;
141 135 }
142 intc_baseaddr = be32_to_cpup(of_get_property(intc, "reg", NULL)); 136
143 intc_baseaddr = (unsigned long) ioremap(intc_baseaddr, PAGE_SIZE);
144 nr_irq = be32_to_cpup(of_get_property(intc,
145 "xlnx,num-intr-inputs", NULL));
146
147 intr_mask =
148 be32_to_cpup(of_get_property(intc, "xlnx,kind-of-intr", NULL));
149 if (intr_mask > (u32)((1ULL << nr_irq) - 1)) 137 if (intr_mask > (u32)((1ULL << nr_irq) - 1))
150 pr_info(" ERROR: Mismatch in kind-of-intr param\n"); 138 pr_info(" ERROR: Mismatch in kind-of-intr param\n");
151 139
152#ifdef CONFIG_SELFMOD_INTC 140 pr_info("%s: num_irq=%d, edge=0x%x\n",
153 selfmod_function((int *) arr_func, intc_baseaddr); 141 intc->full_name, nr_irq, intr_mask);
154#endif
155 pr_info("%s #0 at 0x%08x, num_irq=%d, edge=0x%x\n",
156 intc->name, intc_baseaddr, nr_irq, intr_mask);
157 142
158 /* 143 /*
159 * Disable all external interrupts until they are 144 * Disable all external interrupts until they are
@@ -174,4 +159,8 @@ void __init init_IRQ(void)
174 (void *)intr_mask); 159 (void *)intr_mask);
175 160
176 irq_set_default_host(root_domain); 161 irq_set_default_host(root_domain);
162
163 return 0;
177} 164}
165
166IRQCHIP_DECLARE(xilinx_intc, "xlnx,xps-intc-1.00.a", xilinx_intc_of_init);
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index ace700afbfdf..11e24de91aa4 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -17,10 +17,8 @@
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <linux/kernel_stat.h> 18#include <linux/kernel_stat.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/irqchip.h>
20#include <linux/of_irq.h> 21#include <linux/of_irq.h>
21#include <linux/export.h>
22
23#include <asm/prom.h>
24 22
25static u32 concurrent_irq; 23static u32 concurrent_irq;
26 24
@@ -47,3 +45,9 @@ next_irq:
47 set_irq_regs(old_regs); 45 set_irq_regs(old_regs);
48 trace_hardirqs_on(); 46 trace_hardirqs_on();
49} 47}
48
49void __init init_IRQ(void)
50{
51 /* process the entire interrupt tree in one go */
52 irqchip_init();
53}
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index 2e5079ab53d2..fbe58c6554a8 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -67,7 +67,11 @@ static void gpio_system_reset(void)
67 pr_notice("Reset GPIO unavailable - halting!\n"); 67 pr_notice("Reset GPIO unavailable - halting!\n");
68} 68}
69#else 69#else
70#define gpio_system_reset() do {} while (0) 70static void gpio_system_reset(void)
71{
72 pr_notice("No reset GPIO present - halting!\n");
73}
74
71void of_platform_reset_gpio_probe(void) 75void of_platform_reset_gpio_probe(void)
72{ 76{
73 return; 77 return;
diff --git a/arch/microblaze/kernel/selfmod.c b/arch/microblaze/kernel/selfmod.c
deleted file mode 100644
index 89508bdc9f3c..000000000000
--- a/arch/microblaze/kernel/selfmod.c
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2009 PetaLogix
4 *
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 */
9
10#include <linux/interrupt.h>
11#include <asm/selfmod.h>
12
13#undef DEBUG
14
15#if __GNUC__ > 3
16#error GCC 4 unsupported SELFMOD. Please disable SELFMOD from menuconfig.
17#endif
18
19#define OPCODE_IMM 0xB0000000
20#define OPCODE_LWI 0xE8000000
21#define OPCODE_LWI_MASK 0xEC000000
22#define OPCODE_RTSD 0xB60F0008 /* return from func: rtsd r15, 8 */
23#define OPCODE_ADDIK 0x30000000
24#define OPCODE_ADDIK_MASK 0xFC000000
25
26#define IMM_BASE (OPCODE_IMM | (BARRIER_BASE_ADDR >> 16))
27#define LWI_BASE (OPCODE_LWI | (BARRIER_BASE_ADDR & 0x0000ff00))
28#define LWI_BASE_MASK (OPCODE_LWI_MASK | (BARRIER_BASE_ADDR & 0x0000ff00))
29#define ADDIK_BASE (OPCODE_ADDIK | (BARRIER_BASE_ADDR & 0x0000ff00))
30#define ADDIK_BASE_MASK (OPCODE_ADDIK_MASK | (BARRIER_BASE_ADDR & 0x0000ff00))
31
32#define MODIFY_INSTR { \
33 pr_debug("%s: curr instr, (%d):0x%x, next(%d):0x%x\n", \
34 __func__, i, addr[i], i + 1, addr[i + 1]); \
35 addr[i] = OPCODE_IMM + (base >> 16); \
36 /* keep instruction opcode and add only last 16bits */ \
37 addr[i + 1] = (addr[i + 1] & 0xffff00ff) + (base & 0xffff); \
38 __invalidate_icache(addr[i]); \
39 __invalidate_icache(addr[i + 1]); \
40 pr_debug("%s: hack instr, (%d):0x%x, next(%d):0x%x\n", \
41 __func__, i, addr[i], i + 1, addr[i + 1]); }
42
43/* NOTE
44 * self-modified part of code for improvement of interrupt controller
45 * save instruction in interrupt rutine
46 */
47void selfmod_function(const int *arr_fce, const unsigned int base)
48{
49 unsigned int flags, i, j, *addr = NULL;
50
51 local_irq_save(flags);
52 __disable_icache();
53
54 /* zero terminated array */
55 for (j = 0; arr_fce[j] != 0; j++) {
56 /* get start address of function */
57 addr = (unsigned int *) arr_fce[j];
58 pr_debug("%s: func(%d) at 0x%x\n",
59 __func__, j, (unsigned int) addr);
60 for (i = 0; ; i++) {
61 pr_debug("%s: instruction code at %d: 0x%x\n",
62 __func__, i, addr[i]);
63 if (addr[i] == IMM_BASE) {
64 /* detecting of lwi (0xE8) or swi (0xF8) instr
65 * I can detect both opcode with one mask */
66 if ((addr[i + 1] & LWI_BASE_MASK) == LWI_BASE) {
67 MODIFY_INSTR;
68 } else /* detection addik for ack */
69 if ((addr[i + 1] & ADDIK_BASE_MASK) ==
70 ADDIK_BASE) {
71 MODIFY_INSTR;
72 }
73 } else if (addr[i] == OPCODE_RTSD) {
74 /* return from function means end of function */
75 pr_debug("%s: end of array %d\n", __func__, i);
76 break;
77 }
78 }
79 }
80 local_irq_restore(flags);
81} /* end of self-modified code */
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 0263da7b83dd..0775e036c526 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/clocksource.h>
12#include <linux/string.h> 13#include <linux/string.h>
13#include <linux/seq_file.h> 14#include <linux/seq_file.h>
14#include <linux/cpu.h> 15#include <linux/cpu.h>
@@ -68,10 +69,6 @@ void __init setup_arch(char **cmdline_p)
68 69
69 xilinx_pci_init(); 70 xilinx_pci_init();
70 71
71#if defined(CONFIG_SELFMOD_INTC) || defined(CONFIG_SELFMOD_TIMER)
72 pr_notice("Self modified code enable\n");
73#endif
74
75#ifdef CONFIG_VT 72#ifdef CONFIG_VT
76#if defined(CONFIG_XILINX_CONSOLE) 73#if defined(CONFIG_XILINX_CONSOLE)
77 conswitchp = &xil_con; 74 conswitchp = &xil_con;
@@ -196,6 +193,11 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
196 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; 193 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
197} 194}
198 195
196void __init time_init(void)
197{
198 clocksource_of_init();
199}
200
199#ifdef CONFIG_DEBUG_FS 201#ifdef CONFIG_DEBUG_FS
200struct dentry *of_debugfs_root; 202struct dentry *of_debugfs_root;
201 203
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index aec5020a6e31..e4b3f33ef34c 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu> 2 * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2012-2013 Xilinx, Inc.
3 * Copyright (C) 2007-2009 PetaLogix 4 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc. 5 * Copyright (C) 2006 Atmark Techno, Inc.
5 * 6 *
@@ -8,34 +9,16 @@
8 * for more details. 9 * for more details.
9 */ 10 */
10 11
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/param.h>
14#include <linux/interrupt.h> 12#include <linux/interrupt.h>
15#include <linux/profile.h>
16#include <linux/irq.h>
17#include <linux/delay.h> 13#include <linux/delay.h>
18#include <linux/sched.h> 14#include <linux/sched.h>
19#include <linux/spinlock.h>
20#include <linux/err.h>
21#include <linux/clk.h> 15#include <linux/clk.h>
22#include <linux/clocksource.h>
23#include <linux/clockchips.h> 16#include <linux/clockchips.h>
24#include <linux/io.h> 17#include <linux/of_address.h>
25#include <linux/bug.h>
26#include <asm/cpuinfo.h> 18#include <asm/cpuinfo.h>
27#include <asm/setup.h>
28#include <asm/prom.h>
29#include <asm/irq.h>
30#include <linux/cnt32_to_63.h> 19#include <linux/cnt32_to_63.h>
31 20
32#ifdef CONFIG_SELFMOD_TIMER 21static void __iomem *timer_baseaddr;
33#include <asm/selfmod.h>
34#define TIMER_BASE BARRIER_BASE_ADDR
35#else
36static unsigned int timer_baseaddr;
37#define TIMER_BASE timer_baseaddr
38#endif
39 22
40static unsigned int freq_div_hz; 23static unsigned int freq_div_hz;
41static unsigned int timer_clock_freq; 24static unsigned int timer_clock_freq;
@@ -59,19 +42,21 @@ static unsigned int timer_clock_freq;
59#define TCSR_PWMA (1<<9) 42#define TCSR_PWMA (1<<9)
60#define TCSR_ENALL (1<<10) 43#define TCSR_ENALL (1<<10)
61 44
62static inline void microblaze_timer0_stop(void) 45static inline void xilinx_timer0_stop(void)
63{ 46{
64 out_be32(TIMER_BASE + TCSR0, in_be32(TIMER_BASE + TCSR0) & ~TCSR_ENT); 47 out_be32(timer_baseaddr + TCSR0,
48 in_be32(timer_baseaddr + TCSR0) & ~TCSR_ENT);
65} 49}
66 50
67static inline void microblaze_timer0_start_periodic(unsigned long load_val) 51static inline void xilinx_timer0_start_periodic(unsigned long load_val)
68{ 52{
69 if (!load_val) 53 if (!load_val)
70 load_val = 1; 54 load_val = 1;
71 out_be32(TIMER_BASE + TLR0, load_val); /* loading value to timer reg */ 55 /* loading value to timer reg */
56 out_be32(timer_baseaddr + TLR0, load_val);
72 57
73 /* load the initial value */ 58 /* load the initial value */
74 out_be32(TIMER_BASE + TCSR0, TCSR_LOAD); 59 out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
75 60
76 /* see timer data sheet for detail 61 /* see timer data sheet for detail
77 * !ENALL - don't enable 'em all 62 * !ENALL - don't enable 'em all
@@ -86,38 +71,39 @@ static inline void microblaze_timer0_start_periodic(unsigned long load_val)
86 * UDT - set the timer as down counter 71 * UDT - set the timer as down counter
87 * !MDT0 - generate mode 72 * !MDT0 - generate mode
88 */ 73 */
89 out_be32(TIMER_BASE + TCSR0, 74 out_be32(timer_baseaddr + TCSR0,
90 TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT); 75 TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
91} 76}
92 77
93static inline void microblaze_timer0_start_oneshot(unsigned long load_val) 78static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
94{ 79{
95 if (!load_val) 80 if (!load_val)
96 load_val = 1; 81 load_val = 1;
97 out_be32(TIMER_BASE + TLR0, load_val); /* loading value to timer reg */ 82 /* loading value to timer reg */
83 out_be32(timer_baseaddr + TLR0, load_val);
98 84
99 /* load the initial value */ 85 /* load the initial value */
100 out_be32(TIMER_BASE + TCSR0, TCSR_LOAD); 86 out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
101 87
102 out_be32(TIMER_BASE + TCSR0, 88 out_be32(timer_baseaddr + TCSR0,
103 TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT); 89 TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
104} 90}
105 91
106static int microblaze_timer_set_next_event(unsigned long delta, 92static int xilinx_timer_set_next_event(unsigned long delta,
107 struct clock_event_device *dev) 93 struct clock_event_device *dev)
108{ 94{
109 pr_debug("%s: next event, delta %x\n", __func__, (u32)delta); 95 pr_debug("%s: next event, delta %x\n", __func__, (u32)delta);
110 microblaze_timer0_start_oneshot(delta); 96 xilinx_timer0_start_oneshot(delta);
111 return 0; 97 return 0;
112} 98}
113 99
114static void microblaze_timer_set_mode(enum clock_event_mode mode, 100static void xilinx_timer_set_mode(enum clock_event_mode mode,
115 struct clock_event_device *evt) 101 struct clock_event_device *evt)
116{ 102{
117 switch (mode) { 103 switch (mode) {
118 case CLOCK_EVT_MODE_PERIODIC: 104 case CLOCK_EVT_MODE_PERIODIC:
119 pr_info("%s: periodic\n", __func__); 105 pr_info("%s: periodic\n", __func__);
120 microblaze_timer0_start_periodic(freq_div_hz); 106 xilinx_timer0_start_periodic(freq_div_hz);
121 break; 107 break;
122 case CLOCK_EVT_MODE_ONESHOT: 108 case CLOCK_EVT_MODE_ONESHOT:
123 pr_info("%s: oneshot\n", __func__); 109 pr_info("%s: oneshot\n", __func__);
@@ -127,7 +113,7 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode,
127 break; 113 break;
128 case CLOCK_EVT_MODE_SHUTDOWN: 114 case CLOCK_EVT_MODE_SHUTDOWN:
129 pr_info("%s: shutdown\n", __func__); 115 pr_info("%s: shutdown\n", __func__);
130 microblaze_timer0_stop(); 116 xilinx_timer0_stop();
131 break; 117 break;
132 case CLOCK_EVT_MODE_RESUME: 118 case CLOCK_EVT_MODE_RESUME:
133 pr_info("%s: resume\n", __func__); 119 pr_info("%s: resume\n", __func__);
@@ -135,23 +121,23 @@ static void microblaze_timer_set_mode(enum clock_event_mode mode,
135 } 121 }
136} 122}
137 123
138static struct clock_event_device clockevent_microblaze_timer = { 124static struct clock_event_device clockevent_xilinx_timer = {
139 .name = "microblaze_clockevent", 125 .name = "xilinx_clockevent",
140 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 126 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
141 .shift = 8, 127 .shift = 8,
142 .rating = 300, 128 .rating = 300,
143 .set_next_event = microblaze_timer_set_next_event, 129 .set_next_event = xilinx_timer_set_next_event,
144 .set_mode = microblaze_timer_set_mode, 130 .set_mode = xilinx_timer_set_mode,
145}; 131};
146 132
147static inline void timer_ack(void) 133static inline void timer_ack(void)
148{ 134{
149 out_be32(TIMER_BASE + TCSR0, in_be32(TIMER_BASE + TCSR0)); 135 out_be32(timer_baseaddr + TCSR0, in_be32(timer_baseaddr + TCSR0));
150} 136}
151 137
152static irqreturn_t timer_interrupt(int irq, void *dev_id) 138static irqreturn_t timer_interrupt(int irq, void *dev_id)
153{ 139{
154 struct clock_event_device *evt = &clockevent_microblaze_timer; 140 struct clock_event_device *evt = &clockevent_xilinx_timer;
155#ifdef CONFIG_HEART_BEAT 141#ifdef CONFIG_HEART_BEAT
156 heartbeat(); 142 heartbeat();
157#endif 143#endif
@@ -164,73 +150,74 @@ static struct irqaction timer_irqaction = {
164 .handler = timer_interrupt, 150 .handler = timer_interrupt,
165 .flags = IRQF_DISABLED | IRQF_TIMER, 151 .flags = IRQF_DISABLED | IRQF_TIMER,
166 .name = "timer", 152 .name = "timer",
167 .dev_id = &clockevent_microblaze_timer, 153 .dev_id = &clockevent_xilinx_timer,
168}; 154};
169 155
170static __init void microblaze_clockevent_init(void) 156static __init void xilinx_clockevent_init(void)
171{ 157{
172 clockevent_microblaze_timer.mult = 158 clockevent_xilinx_timer.mult =
173 div_sc(timer_clock_freq, NSEC_PER_SEC, 159 div_sc(timer_clock_freq, NSEC_PER_SEC,
174 clockevent_microblaze_timer.shift); 160 clockevent_xilinx_timer.shift);
175 clockevent_microblaze_timer.max_delta_ns = 161 clockevent_xilinx_timer.max_delta_ns =
176 clockevent_delta2ns((u32)~0, &clockevent_microblaze_timer); 162 clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer);
177 clockevent_microblaze_timer.min_delta_ns = 163 clockevent_xilinx_timer.min_delta_ns =
178 clockevent_delta2ns(1, &clockevent_microblaze_timer); 164 clockevent_delta2ns(1, &clockevent_xilinx_timer);
179 clockevent_microblaze_timer.cpumask = cpumask_of(0); 165 clockevent_xilinx_timer.cpumask = cpumask_of(0);
180 clockevents_register_device(&clockevent_microblaze_timer); 166 clockevents_register_device(&clockevent_xilinx_timer);
181} 167}
182 168
183static cycle_t microblaze_read(struct clocksource *cs) 169static cycle_t xilinx_read(struct clocksource *cs)
184{ 170{
185 /* reading actual value of timer 1 */ 171 /* reading actual value of timer 1 */
186 return (cycle_t) (in_be32(TIMER_BASE + TCR1)); 172 return (cycle_t) (in_be32(timer_baseaddr + TCR1));
187} 173}
188 174
189static struct timecounter microblaze_tc = { 175static struct timecounter xilinx_tc = {
190 .cc = NULL, 176 .cc = NULL,
191}; 177};
192 178
193static cycle_t microblaze_cc_read(const struct cyclecounter *cc) 179static cycle_t xilinx_cc_read(const struct cyclecounter *cc)
194{ 180{
195 return microblaze_read(NULL); 181 return xilinx_read(NULL);
196} 182}
197 183
198static struct cyclecounter microblaze_cc = { 184static struct cyclecounter xilinx_cc = {
199 .read = microblaze_cc_read, 185 .read = xilinx_cc_read,
200 .mask = CLOCKSOURCE_MASK(32), 186 .mask = CLOCKSOURCE_MASK(32),
201 .shift = 8, 187 .shift = 8,
202}; 188};
203 189
204static int __init init_microblaze_timecounter(void) 190static int __init init_xilinx_timecounter(void)
205{ 191{
206 microblaze_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC, 192 xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
207 microblaze_cc.shift); 193 xilinx_cc.shift);
208 194
209 timecounter_init(&microblaze_tc, &microblaze_cc, sched_clock()); 195 timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock());
210 196
211 return 0; 197 return 0;
212} 198}
213 199
214static struct clocksource clocksource_microblaze = { 200static struct clocksource clocksource_microblaze = {
215 .name = "microblaze_clocksource", 201 .name = "xilinx_clocksource",
216 .rating = 300, 202 .rating = 300,
217 .read = microblaze_read, 203 .read = xilinx_read,
218 .mask = CLOCKSOURCE_MASK(32), 204 .mask = CLOCKSOURCE_MASK(32),
219 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 205 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
220}; 206};
221 207
222static int __init microblaze_clocksource_init(void) 208static int __init xilinx_clocksource_init(void)
223{ 209{
224 if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq)) 210 if (clocksource_register_hz(&clocksource_microblaze, timer_clock_freq))
225 panic("failed to register clocksource"); 211 panic("failed to register clocksource");
226 212
227 /* stop timer1 */ 213 /* stop timer1 */
228 out_be32(TIMER_BASE + TCSR1, in_be32(TIMER_BASE + TCSR1) & ~TCSR_ENT); 214 out_be32(timer_baseaddr + TCSR1,
215 in_be32(timer_baseaddr + TCSR1) & ~TCSR_ENT);
229 /* start timer1 - up counting without interrupt */ 216 /* start timer1 - up counting without interrupt */
230 out_be32(TIMER_BASE + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT); 217 out_be32(timer_baseaddr + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
231 218
232 /* register timecounter - for ftrace support */ 219 /* register timecounter - for ftrace support */
233 init_microblaze_timecounter(); 220 init_xilinx_timecounter();
234 return 0; 221 return 0;
235} 222}
236 223
@@ -240,55 +227,31 @@ static int __init microblaze_clocksource_init(void)
240 */ 227 */
241static int timer_initialized; 228static int timer_initialized;
242 229
243void __init time_init(void) 230static void __init xilinx_timer_init(struct device_node *timer)
244{ 231{
245 u32 irq; 232 u32 irq;
246 u32 timer_num = 1; 233 u32 timer_num = 1;
247 struct device_node *timer = NULL; 234 int ret;
248 const void *prop; 235
249#ifdef CONFIG_SELFMOD_TIMER 236 timer_baseaddr = of_iomap(timer, 0);
250 unsigned int timer_baseaddr = 0; 237 if (!timer_baseaddr) {
251 int arr_func[] = { 238 pr_err("ERROR: invalid timer base address\n");
252 (int)&microblaze_read, 239 BUG();
253 (int)&timer_interrupt, 240 }
254 (int)&microblaze_clocksource_init, 241
255 (int)&microblaze_timer_set_mode,
256 (int)&microblaze_timer_set_next_event,
257 0
258 };
259#endif
260 prop = of_get_property(of_chosen, "system-timer", NULL);
261 if (prop)
262 timer = of_find_node_by_phandle(be32_to_cpup(prop));
263 else
264 pr_info("No chosen timer found, using default\n");
265
266 if (!timer)
267 timer = of_find_compatible_node(NULL, NULL,
268 "xlnx,xps-timer-1.00.a");
269 BUG_ON(!timer);
270
271 timer_baseaddr = be32_to_cpup(of_get_property(timer, "reg", NULL));
272 timer_baseaddr = (unsigned long) ioremap(timer_baseaddr, PAGE_SIZE);
273 irq = irq_of_parse_and_map(timer, 0); 242 irq = irq_of_parse_and_map(timer, 0);
274 timer_num = be32_to_cpup(of_get_property(timer, 243
275 "xlnx,one-timer-only", NULL)); 244 of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
276 if (timer_num) { 245 if (timer_num) {
277 pr_emerg("Please enable two timers in HW\n"); 246 pr_emerg("Please enable two timers in HW\n");
278 BUG(); 247 BUG();
279 } 248 }
280 249
281#ifdef CONFIG_SELFMOD_TIMER 250 pr_info("%s: irq=%d\n", timer->full_name, irq);
282 selfmod_function((int *) arr_func, timer_baseaddr);
283#endif
284 pr_info("%s #0 at 0x%08x, irq=%d\n",
285 timer->name, timer_baseaddr, irq);
286 251
287 /* If there is clock-frequency property than use it */ 252 /* If there is clock-frequency property than use it */
288 prop = of_get_property(timer, "clock-frequency", NULL); 253 ret = of_property_read_u32(timer, "clock-frequency", &timer_clock_freq);
289 if (prop) 254 if (ret < 0)
290 timer_clock_freq = be32_to_cpup(prop);
291 else
292 timer_clock_freq = cpuinfo.cpu_clock_freq; 255 timer_clock_freq = cpuinfo.cpu_clock_freq;
293 256
294 freq_div_hz = timer_clock_freq / HZ; 257 freq_div_hz = timer_clock_freq / HZ;
@@ -297,8 +260,8 @@ void __init time_init(void)
297#ifdef CONFIG_HEART_BEAT 260#ifdef CONFIG_HEART_BEAT
298 setup_heartbeat(); 261 setup_heartbeat();
299#endif 262#endif
300 microblaze_clocksource_init(); 263 xilinx_clocksource_init();
301 microblaze_clockevent_init(); 264 xilinx_clockevent_init();
302 timer_initialized = 1; 265 timer_initialized = 1;
303} 266}
304 267
@@ -312,3 +275,6 @@ unsigned long long notrace sched_clock(void)
312 } 275 }
313 return 0; 276 return 0;
314} 277}
278
279CLOCKSOURCE_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
280 xilinx_timer_init);
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index bdb8ea100e73..1b93bf0892a0 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -657,67 +657,42 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
657void pci_process_bridge_OF_ranges(struct pci_controller *hose, 657void pci_process_bridge_OF_ranges(struct pci_controller *hose,
658 struct device_node *dev, int primary) 658 struct device_node *dev, int primary)
659{ 659{
660 const u32 *ranges;
661 int rlen;
662 int pna = of_n_addr_cells(dev);
663 int np = pna + 5;
664 int memno = 0, isa_hole = -1; 660 int memno = 0, isa_hole = -1;
665 u32 pci_space;
666 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
667 unsigned long long isa_mb = 0; 661 unsigned long long isa_mb = 0;
668 struct resource *res; 662 struct resource *res;
663 struct of_pci_range range;
664 struct of_pci_range_parser parser;
669 665
670 pr_info("PCI host bridge %s %s ranges:\n", 666 pr_info("PCI host bridge %s %s ranges:\n",
671 dev->full_name, primary ? "(primary)" : ""); 667 dev->full_name, primary ? "(primary)" : "");
672 668
673 /* Get ranges property */ 669 /* Check for ranges property */
674 ranges = of_get_property(dev, "ranges", &rlen); 670 if (of_pci_range_parser_init(&parser, dev))
675 if (ranges == NULL)
676 return; 671 return;
677 672
678 /* Parse it */
679 pr_debug("Parsing ranges property...\n"); 673 pr_debug("Parsing ranges property...\n");
680 while ((rlen -= np * 4) >= 0) { 674 for_each_of_pci_range(&parser, &range) {
681 /* Read next ranges element */ 675 /* Read next ranges element */
682 pci_space = ranges[0];
683 pci_addr = of_read_number(ranges + 1, 2);
684 cpu_addr = of_translate_address(dev, ranges + 3);
685 size = of_read_number(ranges + pna + 3, 2);
686
687 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ", 676 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
688 pci_space, pci_addr); 677 range.pci_space, range.pci_addr);
689 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n", 678 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
690 cpu_addr, size); 679 range.cpu_addr, range.size);
691
692 ranges += np;
693 680
694 /* If we failed translation or got a zero-sized region 681 /* If we failed translation or got a zero-sized region
695 * (some FW try to feed us with non sensical zero sized regions 682 * (some FW try to feed us with non sensical zero sized regions
696 * such as power3 which look like some kind of attempt 683 * such as power3 which look like some kind of attempt
697 * at exposing the VGA memory hole) 684 * at exposing the VGA memory hole)
698 */ 685 */
699 if (cpu_addr == OF_BAD_ADDR || size == 0) 686 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
700 continue; 687 continue;
701 688
702 /* Now consume following elements while they are contiguous */
703 for (; rlen >= np * sizeof(u32);
704 ranges += np, rlen -= np * 4) {
705 if (ranges[0] != pci_space)
706 break;
707 pci_next = of_read_number(ranges + 1, 2);
708 cpu_next = of_translate_address(dev, ranges + 3);
709 if (pci_next != pci_addr + size ||
710 cpu_next != cpu_addr + size)
711 break;
712 size += of_read_number(ranges + pna + 3, 2);
713 }
714
715 /* Act based on address space type */ 689 /* Act based on address space type */
716 res = NULL; 690 res = NULL;
717 switch ((pci_space >> 24) & 0x3) { 691 switch (range.flags & IORESOURCE_TYPE_BITS) {
718 case 1: /* PCI IO space */ 692 case IORESOURCE_IO:
719 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n", 693 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
720 cpu_addr, cpu_addr + size - 1, pci_addr); 694 range.cpu_addr, range.cpu_addr + range.size - 1,
695 range.pci_addr);
721 696
722 /* We support only one IO range */ 697 /* We support only one IO range */
723 if (hose->pci_io_size) { 698 if (hose->pci_io_size) {
@@ -725,11 +700,12 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
725 continue; 700 continue;
726 } 701 }
727 /* On 32 bits, limit I/O space to 16MB */ 702 /* On 32 bits, limit I/O space to 16MB */
728 if (size > 0x01000000) 703 if (range.size > 0x01000000)
729 size = 0x01000000; 704 range.size = 0x01000000;
730 705
731 /* 32 bits needs to map IOs here */ 706 /* 32 bits needs to map IOs here */
732 hose->io_base_virt = ioremap(cpu_addr, size); 707 hose->io_base_virt = ioremap(range.cpu_addr,
708 range.size);
733 709
734 /* Expect trouble if pci_addr is not 0 */ 710 /* Expect trouble if pci_addr is not 0 */
735 if (primary) 711 if (primary)
@@ -738,19 +714,20 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
738 /* pci_io_size and io_base_phys always represent IO 714 /* pci_io_size and io_base_phys always represent IO
739 * space starting at 0 so we factor in pci_addr 715 * space starting at 0 so we factor in pci_addr
740 */ 716 */
741 hose->pci_io_size = pci_addr + size; 717 hose->pci_io_size = range.pci_addr + range.size;
742 hose->io_base_phys = cpu_addr - pci_addr; 718 hose->io_base_phys = range.cpu_addr - range.pci_addr;
743 719
744 /* Build resource */ 720 /* Build resource */
745 res = &hose->io_resource; 721 res = &hose->io_resource;
746 res->flags = IORESOURCE_IO; 722 range.cpu_addr = range.pci_addr;
747 res->start = pci_addr; 723
748 break; 724 break;
749 case 2: /* PCI Memory space */ 725 case IORESOURCE_MEM:
750 case 3: /* PCI 64 bits Memory space */
751 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n", 726 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
752 cpu_addr, cpu_addr + size - 1, pci_addr, 727 range.cpu_addr, range.cpu_addr + range.size - 1,
753 (pci_space & 0x40000000) ? "Prefetch" : ""); 728 range.pci_addr,
729 (range.pci_space & 0x40000000) ?
730 "Prefetch" : "");
754 731
755 /* We support only 3 memory ranges */ 732 /* We support only 3 memory ranges */
756 if (memno >= 3) { 733 if (memno >= 3) {
@@ -758,13 +735,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
758 continue; 735 continue;
759 } 736 }
760 /* Handles ISA memory hole space here */ 737 /* Handles ISA memory hole space here */
761 if (pci_addr == 0) { 738 if (range.pci_addr == 0) {
762 isa_mb = cpu_addr; 739 isa_mb = range.cpu_addr;
763 isa_hole = memno; 740 isa_hole = memno;
764 if (primary || isa_mem_base == 0) 741 if (primary || isa_mem_base == 0)
765 isa_mem_base = cpu_addr; 742 isa_mem_base = range.cpu_addr;
766 hose->isa_mem_phys = cpu_addr; 743 hose->isa_mem_phys = range.cpu_addr;
767 hose->isa_mem_size = size; 744 hose->isa_mem_size = range.size;
768 } 745 }
769 746
770 /* We get the PCI/Mem offset from the first range or 747 /* We get the PCI/Mem offset from the first range or
@@ -772,30 +749,23 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
772 * hole. If they don't match, bugger. 749 * hole. If they don't match, bugger.
773 */ 750 */
774 if (memno == 0 || 751 if (memno == 0 ||
775 (isa_hole >= 0 && pci_addr != 0 && 752 (isa_hole >= 0 && range.pci_addr != 0 &&
776 hose->pci_mem_offset == isa_mb)) 753 hose->pci_mem_offset == isa_mb))
777 hose->pci_mem_offset = cpu_addr - pci_addr; 754 hose->pci_mem_offset = range.cpu_addr -
778 else if (pci_addr != 0 && 755 range.pci_addr;
779 hose->pci_mem_offset != cpu_addr - pci_addr) { 756 else if (range.pci_addr != 0 &&
757 hose->pci_mem_offset != range.cpu_addr -
758 range.pci_addr) {
780 pr_info(" \\--> Skipped (offset mismatch) !\n"); 759 pr_info(" \\--> Skipped (offset mismatch) !\n");
781 continue; 760 continue;
782 } 761 }
783 762
784 /* Build resource */ 763 /* Build resource */
785 res = &hose->mem_resources[memno++]; 764 res = &hose->mem_resources[memno++];
786 res->flags = IORESOURCE_MEM;
787 if (pci_space & 0x40000000)
788 res->flags |= IORESOURCE_PREFETCH;
789 res->start = cpu_addr;
790 break; 765 break;
791 } 766 }
792 if (res != NULL) { 767 if (res != NULL)
793 res->name = dev->full_name; 768 of_pci_range_to_resource(&range, dev, res);
794 res->end = res->start + size - 1;
795 res->parent = NULL;
796 res->sibling = NULL;
797 res->child = NULL;
798 }
799 } 769 }
800 770
801 /* If there's an ISA hole and the pci_mem_offset is -not- matching 771 /* If there's an ISA hole and the pci_mem_offset is -not- matching
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
index b1747211b8b1..db1aa5c22cea 100644
--- a/arch/microblaze/platform/Kconfig.platform
+++ b/arch/microblaze/platform/Kconfig.platform
@@ -18,28 +18,6 @@ config PLATFORM_GENERIC
18 18
19endchoice 19endchoice
20 20
21config SELFMOD
22 bool "Use self modified code for intc/timer"
23 depends on NO_MMU
24 default n
25 help
26 This choice enables self-modified code for interrupt controller
27 and timer.
28
29config SELFMOD_INTC
30 bool "Use self modified code for intc"
31 depends on SELFMOD
32 default y
33 help
34 This choice enables self-modified code for interrupt controller.
35
36config SELFMOD_TIMER
37 bool "Use self modified code for timer"
38 depends on SELFMOD
39 default y
40 help
41 This choice enables self-modified code for timer.
42
43config OPT_LIB_FUNCTION 21config OPT_LIB_FUNCTION
44 bool "Optimalized lib function" 22 bool "Optimalized lib function"
45 default y 23 default y
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index e12764c2a9d0..dccd7cec442d 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2305,9 +2305,9 @@ config KEXEC
2305 2305
2306 It is an ongoing process to be certain the hardware in a machine 2306 It is an ongoing process to be certain the hardware in a machine
2307 is properly shutdown, so do not be surprised if this code does not 2307 is properly shutdown, so do not be surprised if this code does not
2308 initially work for you. It may help to enable device hotplugging 2308 initially work for you. As of this writing the exact hardware
2309 support. As of this writing the exact hardware interface is 2309 interface is strongly in flux, so no good recommendation can be
2310 strongly in flux, so no good recommendation can be made. 2310 made.
2311 2311
2312config CRASH_DUMP 2312config CRASH_DUMP
2313 bool "Kernel crash dumps" 2313 bool "Kernel crash dumps"
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc086087a72..fa44f3ec5302 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
17#define current_cpu_type() current_cpu_data.cputype 17#define current_cpu_type() current_cpu_data.cputype
18#endif 18#endif
19 19
20#define boot_cpu_type() cpu_data[0].cputype
21
20/* 22/*
21 * SMP assumption: Options of CPU 0 are a superset of all processors. 23 * SMP assumption: Options of CPU 0 are a superset of all processors.
22 * This is true for all known MIPS systems. 24 * This is true for all known MIPS systems.
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 159abc8842d2..126da74d4c55 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
66 int i, cpu = 1, boot_cpu = 0; 66 int i, cpu = 1, boot_cpu = 0;
67 67
68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 68#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
69 int cpu_hw_intr;
70
69 /* arbitration priority */ 71 /* arbitration priority */
70 clear_c0_brcm_cmt_ctrl(0x30); 72 clear_c0_brcm_cmt_ctrl(0x30);
71 73
@@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void)
80 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output 82 * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
81 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output 83 * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
82 */ 84 */
83 change_c0_brcm_cmt_intr(0xf8018000, 85 if (boot_cpu == 0)
84 (0x02 << 27) | (0x03 << 15)); 86 cpu_hw_intr = 0x02;
87 else
88 cpu_hw_intr = 0x1d;
89
90 change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
85 91
86 /* single core, 2 threads (2 pipelines) */ 92 /* single core, 2 threads (2 pipelines) */
87 max_cpus = 2; 93 max_cpus = 2;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 1765bab000a0..faf84c5f2629 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1335,8 +1335,9 @@ static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1335 1335
1336 return len; 1336 return len;
1337} 1337}
1338static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
1338 1339
1339static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr, 1340static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
1340 char *buf) 1341 char *buf)
1341{ 1342{
1342 struct vpe *vpe = get_vpe(tclimit); 1343 struct vpe *vpe = get_vpe(tclimit);
@@ -1344,7 +1345,7 @@ static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
1344 return sprintf(buf, "%d\n", vpe->ntcs); 1345 return sprintf(buf, "%d\n", vpe->ntcs);
1345} 1346}
1346 1347
1347static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr, 1348static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
1348 const char *buf, size_t len) 1349 const char *buf, size_t len)
1349{ 1350{
1350 struct vpe *vpe = get_vpe(tclimit); 1351 struct vpe *vpe = get_vpe(tclimit);
@@ -1365,12 +1366,14 @@ static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
1365out_einval: 1366out_einval:
1366 return -EINVAL; 1367 return -EINVAL;
1367} 1368}
1369static DEVICE_ATTR_RW(ntcs);
1368 1370
1369static struct device_attribute vpe_class_attributes[] = { 1371static struct attribute vpe_attrs[] = {
1370 __ATTR(kill, S_IWUSR, NULL, store_kill), 1372 &dev_attr_kill.attr,
1371 __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), 1373 &dev_attr_ntcs.attr,
1372 {} 1374 NULL,
1373}; 1375};
1376ATTRIBUTE_GROUPS(vpe);
1374 1377
1375static void vpe_device_release(struct device *cd) 1378static void vpe_device_release(struct device *cd)
1376{ 1379{
@@ -1381,7 +1384,7 @@ struct class vpe_class = {
1381 .name = "vpe", 1384 .name = "vpe",
1382 .owner = THIS_MODULE, 1385 .owner = THIS_MODULE,
1383 .dev_release = vpe_device_release, 1386 .dev_release = vpe_device_release,
1384 .dev_attrs = vpe_class_attributes, 1387 .dev_groups = vpe_groups,
1385}; 1388};
1386 1389
1387struct device vpe_device; 1390struct device vpe_device;
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index dca2aa665993..bbace092ad0a 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -1,13 +1,13 @@
1/* 1/*
2* This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4* for more details. 4 * for more details.
5* 5 *
6* Main entry point for the guest, exception handling. 6 * Main entry point for the guest, exception handling.
7* 7 *
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com> 9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/ 10 */
11 11
12#include <asm/asm.h> 12#include <asm/asm.h>
13#include <asm/asmmacro.h> 13#include <asm/asmmacro.h>
@@ -55,195 +55,193 @@
55 * a0: run 55 * a0: run
56 * a1: vcpu 56 * a1: vcpu
57 */ 57 */
58 .set noreorder
59 .set noat
58 60
59FEXPORT(__kvm_mips_vcpu_run) 61FEXPORT(__kvm_mips_vcpu_run)
60 .set push 62 /* k0/k1 not being used in host kernel context */
61 .set noreorder 63 INT_ADDIU k1, sp, -PT_SIZE
62 .set noat 64 LONG_S $0, PT_R0(k1)
63 65 LONG_S $1, PT_R1(k1)
64 /* k0/k1 not being used in host kernel context */ 66 LONG_S $2, PT_R2(k1)
65 addiu k1,sp, -PT_SIZE 67 LONG_S $3, PT_R3(k1)
66 LONG_S $0, PT_R0(k1) 68
67 LONG_S $1, PT_R1(k1) 69 LONG_S $4, PT_R4(k1)
68 LONG_S $2, PT_R2(k1) 70 LONG_S $5, PT_R5(k1)
69 LONG_S $3, PT_R3(k1) 71 LONG_S $6, PT_R6(k1)
70 72 LONG_S $7, PT_R7(k1)
71 LONG_S $4, PT_R4(k1) 73
72 LONG_S $5, PT_R5(k1) 74 LONG_S $8, PT_R8(k1)
73 LONG_S $6, PT_R6(k1) 75 LONG_S $9, PT_R9(k1)
74 LONG_S $7, PT_R7(k1) 76 LONG_S $10, PT_R10(k1)
75 77 LONG_S $11, PT_R11(k1)
76 LONG_S $8, PT_R8(k1) 78 LONG_S $12, PT_R12(k1)
77 LONG_S $9, PT_R9(k1) 79 LONG_S $13, PT_R13(k1)
78 LONG_S $10, PT_R10(k1) 80 LONG_S $14, PT_R14(k1)
79 LONG_S $11, PT_R11(k1) 81 LONG_S $15, PT_R15(k1)
80 LONG_S $12, PT_R12(k1) 82 LONG_S $16, PT_R16(k1)
81 LONG_S $13, PT_R13(k1) 83 LONG_S $17, PT_R17(k1)
82 LONG_S $14, PT_R14(k1) 84
83 LONG_S $15, PT_R15(k1) 85 LONG_S $18, PT_R18(k1)
84 LONG_S $16, PT_R16(k1) 86 LONG_S $19, PT_R19(k1)
85 LONG_S $17, PT_R17(k1) 87 LONG_S $20, PT_R20(k1)
86 88 LONG_S $21, PT_R21(k1)
87 LONG_S $18, PT_R18(k1) 89 LONG_S $22, PT_R22(k1)
88 LONG_S $19, PT_R19(k1) 90 LONG_S $23, PT_R23(k1)
89 LONG_S $20, PT_R20(k1) 91 LONG_S $24, PT_R24(k1)
90 LONG_S $21, PT_R21(k1) 92 LONG_S $25, PT_R25(k1)
91 LONG_S $22, PT_R22(k1)
92 LONG_S $23, PT_R23(k1)
93 LONG_S $24, PT_R24(k1)
94 LONG_S $25, PT_R25(k1)
95 93
96 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ 94 /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
97 95
98 LONG_S $28, PT_R28(k1) 96 LONG_S $28, PT_R28(k1)
99 LONG_S $29, PT_R29(k1) 97 LONG_S $29, PT_R29(k1)
100 LONG_S $30, PT_R30(k1) 98 LONG_S $30, PT_R30(k1)
101 LONG_S $31, PT_R31(k1) 99 LONG_S $31, PT_R31(k1)
102 100
103 /* Save hi/lo */ 101 /* Save hi/lo */
104 mflo v0 102 mflo v0
105 LONG_S v0, PT_LO(k1) 103 LONG_S v0, PT_LO(k1)
106 mfhi v1 104 mfhi v1
107 LONG_S v1, PT_HI(k1) 105 LONG_S v1, PT_HI(k1)
108 106
109 /* Save host status */ 107 /* Save host status */
110 mfc0 v0, CP0_STATUS 108 mfc0 v0, CP0_STATUS
111 LONG_S v0, PT_STATUS(k1) 109 LONG_S v0, PT_STATUS(k1)
112 110
113 /* Save host ASID, shove it into the BVADDR location */ 111 /* Save host ASID, shove it into the BVADDR location */
114 mfc0 v1,CP0_ENTRYHI 112 mfc0 v1, CP0_ENTRYHI
115 andi v1, 0xff 113 andi v1, 0xff
116 LONG_S v1, PT_HOST_ASID(k1) 114 LONG_S v1, PT_HOST_ASID(k1)
117 115
118 /* Save DDATA_LO, will be used to store pointer to vcpu */ 116 /* Save DDATA_LO, will be used to store pointer to vcpu */
119 mfc0 v1, CP0_DDATA_LO 117 mfc0 v1, CP0_DDATA_LO
120 LONG_S v1, PT_HOST_USERLOCAL(k1) 118 LONG_S v1, PT_HOST_USERLOCAL(k1)
121 119
122 /* DDATA_LO has pointer to vcpu */ 120 /* DDATA_LO has pointer to vcpu */
123 mtc0 a1,CP0_DDATA_LO 121 mtc0 a1, CP0_DDATA_LO
124 122
125 /* Offset into vcpu->arch */ 123 /* Offset into vcpu->arch */
126 addiu k1, a1, VCPU_HOST_ARCH 124 INT_ADDIU k1, a1, VCPU_HOST_ARCH
127 125
128 /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ 126 /*
129 LONG_S sp, VCPU_HOST_STACK(k1) 127 * Save the host stack to VCPU, used for exception processing
128 * when we exit from the Guest
129 */
130 LONG_S sp, VCPU_HOST_STACK(k1)
130 131
131 /* Save the kernel gp as well */ 132 /* Save the kernel gp as well */
132 LONG_S gp, VCPU_HOST_GP(k1) 133 LONG_S gp, VCPU_HOST_GP(k1)
133 134
134 /* Setup status register for running the guest in UM, interrupts are disabled */ 135 /* Setup status register for running the guest in UM, interrupts are disabled */
135 li k0,(ST0_EXL | KSU_USER| ST0_BEV) 136 li k0, (ST0_EXL | KSU_USER | ST0_BEV)
136 mtc0 k0,CP0_STATUS 137 mtc0 k0, CP0_STATUS
137 ehb 138 ehb
138 139
139 /* load up the new EBASE */ 140 /* load up the new EBASE */
140 LONG_L k0, VCPU_GUEST_EBASE(k1) 141 LONG_L k0, VCPU_GUEST_EBASE(k1)
141 mtc0 k0,CP0_EBASE 142 mtc0 k0, CP0_EBASE
142 143
143 /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was 144 /*
144 * but make sure that timer interrupts are enabled 145 * Now that the new EBASE has been loaded, unset BEV, set
145 */ 146 * interrupt mask as it was but make sure that timer interrupts
146 li k0,(ST0_EXL | KSU_USER | ST0_IE) 147 * are enabled
147 andi v0, v0, ST0_IM 148 */
148 or k0, k0, v0 149 li k0, (ST0_EXL | KSU_USER | ST0_IE)
149 mtc0 k0,CP0_STATUS 150 andi v0, v0, ST0_IM
150 ehb 151 or k0, k0, v0
152 mtc0 k0, CP0_STATUS
153 ehb
151 154
152 155
153 /* Set Guest EPC */ 156 /* Set Guest EPC */
154 LONG_L t0, VCPU_PC(k1) 157 LONG_L t0, VCPU_PC(k1)
155 mtc0 t0, CP0_EPC 158 mtc0 t0, CP0_EPC
156 159
157FEXPORT(__kvm_mips_load_asid) 160FEXPORT(__kvm_mips_load_asid)
158 /* Set the ASID for the Guest Kernel */ 161 /* Set the ASID for the Guest Kernel */
159 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 162 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
160 /* addresses shift to 0x80000000 */ 163 /* addresses shift to 0x80000000 */
161 bltz t0, 1f /* If kernel */ 164 bltz t0, 1f /* If kernel */
162 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
163 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1641: 1671:
165 /* t1: contains the base of the ASID array, need to get the cpu id */ 168 /* t1: contains the base of the ASID array, need to get the cpu id */
166 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 169 LONG_L t2, TI_CPU($28) /* smp_processor_id */
167 sll t2, t2, 2 /* x4 */ 170 INT_SLL t2, t2, 2 /* x4 */
168 addu t3, t1, t2 171 REG_ADDU t3, t1, t2
169 LONG_L k0, (t3) 172 LONG_L k0, (t3)
170 andi k0, k0, 0xff 173 andi k0, k0, 0xff
171 mtc0 k0,CP0_ENTRYHI 174 mtc0 k0, CP0_ENTRYHI
172 ehb 175 ehb
173 176
174 /* Disable RDHWR access */ 177 /* Disable RDHWR access */
175 mtc0 zero, CP0_HWRENA 178 mtc0 zero, CP0_HWRENA
176 179
177 /* Now load up the Guest Context from VCPU */ 180 /* Now load up the Guest Context from VCPU */
178 LONG_L $1, VCPU_R1(k1) 181 LONG_L $1, VCPU_R1(k1)
179 LONG_L $2, VCPU_R2(k1) 182 LONG_L $2, VCPU_R2(k1)
180 LONG_L $3, VCPU_R3(k1) 183 LONG_L $3, VCPU_R3(k1)
181 184
182 LONG_L $4, VCPU_R4(k1) 185 LONG_L $4, VCPU_R4(k1)
183 LONG_L $5, VCPU_R5(k1) 186 LONG_L $5, VCPU_R5(k1)
184 LONG_L $6, VCPU_R6(k1) 187 LONG_L $6, VCPU_R6(k1)
185 LONG_L $7, VCPU_R7(k1) 188 LONG_L $7, VCPU_R7(k1)
186 189
187 LONG_L $8, VCPU_R8(k1) 190 LONG_L $8, VCPU_R8(k1)
188 LONG_L $9, VCPU_R9(k1) 191 LONG_L $9, VCPU_R9(k1)
189 LONG_L $10, VCPU_R10(k1) 192 LONG_L $10, VCPU_R10(k1)
190 LONG_L $11, VCPU_R11(k1) 193 LONG_L $11, VCPU_R11(k1)
191 LONG_L $12, VCPU_R12(k1) 194 LONG_L $12, VCPU_R12(k1)
192 LONG_L $13, VCPU_R13(k1) 195 LONG_L $13, VCPU_R13(k1)
193 LONG_L $14, VCPU_R14(k1) 196 LONG_L $14, VCPU_R14(k1)
194 LONG_L $15, VCPU_R15(k1) 197 LONG_L $15, VCPU_R15(k1)
195 LONG_L $16, VCPU_R16(k1) 198 LONG_L $16, VCPU_R16(k1)
196 LONG_L $17, VCPU_R17(k1) 199 LONG_L $17, VCPU_R17(k1)
197 LONG_L $18, VCPU_R18(k1) 200 LONG_L $18, VCPU_R18(k1)
198 LONG_L $19, VCPU_R19(k1) 201 LONG_L $19, VCPU_R19(k1)
199 LONG_L $20, VCPU_R20(k1) 202 LONG_L $20, VCPU_R20(k1)
200 LONG_L $21, VCPU_R21(k1) 203 LONG_L $21, VCPU_R21(k1)
201 LONG_L $22, VCPU_R22(k1) 204 LONG_L $22, VCPU_R22(k1)
202 LONG_L $23, VCPU_R23(k1) 205 LONG_L $23, VCPU_R23(k1)
203 LONG_L $24, VCPU_R24(k1) 206 LONG_L $24, VCPU_R24(k1)
204 LONG_L $25, VCPU_R25(k1) 207 LONG_L $25, VCPU_R25(k1)
205 208
206 /* k0/k1 loaded up later */ 209 /* k0/k1 loaded up later */
207 210
208 LONG_L $28, VCPU_R28(k1) 211 LONG_L $28, VCPU_R28(k1)
209 LONG_L $29, VCPU_R29(k1) 212 LONG_L $29, VCPU_R29(k1)
210 LONG_L $30, VCPU_R30(k1) 213 LONG_L $30, VCPU_R30(k1)
211 LONG_L $31, VCPU_R31(k1) 214 LONG_L $31, VCPU_R31(k1)
212 215
213 /* Restore hi/lo */ 216 /* Restore hi/lo */
214 LONG_L k0, VCPU_LO(k1) 217 LONG_L k0, VCPU_LO(k1)
215 mtlo k0 218 mtlo k0
216 219
217 LONG_L k0, VCPU_HI(k1) 220 LONG_L k0, VCPU_HI(k1)
218 mthi k0 221 mthi k0
219 222
220FEXPORT(__kvm_mips_load_k0k1) 223FEXPORT(__kvm_mips_load_k0k1)
221 /* Restore the guest's k0/k1 registers */ 224 /* Restore the guest's k0/k1 registers */
222 LONG_L k0, VCPU_R26(k1) 225 LONG_L k0, VCPU_R26(k1)
223 LONG_L k1, VCPU_R27(k1) 226 LONG_L k1, VCPU_R27(k1)
224 227
225 /* Jump to guest */ 228 /* Jump to guest */
226 eret 229 eret
227 .set pop
228 230
229VECTOR(MIPSX(exception), unknown) 231VECTOR(MIPSX(exception), unknown)
230/* 232/*
231 * Find out what mode we came from and jump to the proper handler. 233 * Find out what mode we came from and jump to the proper handler.
232 */ 234 */
233 .set push 235 mtc0 k0, CP0_ERROREPC #01: Save guest k0
234 .set noat 236 ehb #02:
235 .set noreorder 237
236 mtc0 k0, CP0_ERROREPC #01: Save guest k0 238 mfc0 k0, CP0_EBASE #02: Get EBASE
237 ehb #02: 239 INT_SRL k0, k0, 10 #03: Get rid of CPUNum
238 240 INT_SLL k0, k0, 10 #04
239 mfc0 k0, CP0_EBASE #02: Get EBASE 241 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
240 srl k0, k0, 10 #03: Get rid of CPUNum 242 INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
241 sll k0, k0, 10 #04 243 j k0 #07: jump to the function
242 LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 244 nop #08: branch delay slot
243 addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
244 j k0 #07: jump to the function
245 nop #08: branch delay slot
246 .set push
247VECTOR_END(MIPSX(exceptionEnd)) 245VECTOR_END(MIPSX(exceptionEnd))
248.end MIPSX(exception) 246.end MIPSX(exception)
249 247
@@ -253,329 +251,327 @@ VECTOR_END(MIPSX(exceptionEnd))
253 * 251 *
254 */ 252 */
255NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) 253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
256 .set push 254 /* Get the VCPU pointer from DDTATA_LO */
257 .set noat 255 mfc0 k1, CP0_DDATA_LO
258 .set noreorder 256 INT_ADDIU k1, k1, VCPU_HOST_ARCH
259 257
260 /* Get the VCPU pointer from DDTATA_LO */ 258 /* Start saving Guest context to VCPU */
261 mfc0 k1, CP0_DDATA_LO 259 LONG_S $0, VCPU_R0(k1)
262 addiu k1, k1, VCPU_HOST_ARCH 260 LONG_S $1, VCPU_R1(k1)
263 261 LONG_S $2, VCPU_R2(k1)
264 /* Start saving Guest context to VCPU */ 262 LONG_S $3, VCPU_R3(k1)
265 LONG_S $0, VCPU_R0(k1) 263 LONG_S $4, VCPU_R4(k1)
266 LONG_S $1, VCPU_R1(k1) 264 LONG_S $5, VCPU_R5(k1)
267 LONG_S $2, VCPU_R2(k1) 265 LONG_S $6, VCPU_R6(k1)
268 LONG_S $3, VCPU_R3(k1) 266 LONG_S $7, VCPU_R7(k1)
269 LONG_S $4, VCPU_R4(k1) 267 LONG_S $8, VCPU_R8(k1)
270 LONG_S $5, VCPU_R5(k1) 268 LONG_S $9, VCPU_R9(k1)
271 LONG_S $6, VCPU_R6(k1) 269 LONG_S $10, VCPU_R10(k1)
272 LONG_S $7, VCPU_R7(k1) 270 LONG_S $11, VCPU_R11(k1)
273 LONG_S $8, VCPU_R8(k1) 271 LONG_S $12, VCPU_R12(k1)
274 LONG_S $9, VCPU_R9(k1) 272 LONG_S $13, VCPU_R13(k1)
275 LONG_S $10, VCPU_R10(k1) 273 LONG_S $14, VCPU_R14(k1)
276 LONG_S $11, VCPU_R11(k1) 274 LONG_S $15, VCPU_R15(k1)
277 LONG_S $12, VCPU_R12(k1) 275 LONG_S $16, VCPU_R16(k1)
278 LONG_S $13, VCPU_R13(k1) 276 LONG_S $17, VCPU_R17(k1)
279 LONG_S $14, VCPU_R14(k1) 277 LONG_S $18, VCPU_R18(k1)
280 LONG_S $15, VCPU_R15(k1) 278 LONG_S $19, VCPU_R19(k1)
281 LONG_S $16, VCPU_R16(k1) 279 LONG_S $20, VCPU_R20(k1)
282 LONG_S $17,VCPU_R17(k1) 280 LONG_S $21, VCPU_R21(k1)
283 LONG_S $18, VCPU_R18(k1) 281 LONG_S $22, VCPU_R22(k1)
284 LONG_S $19, VCPU_R19(k1) 282 LONG_S $23, VCPU_R23(k1)
285 LONG_S $20, VCPU_R20(k1) 283 LONG_S $24, VCPU_R24(k1)
286 LONG_S $21, VCPU_R21(k1) 284 LONG_S $25, VCPU_R25(k1)
287 LONG_S $22, VCPU_R22(k1) 285
288 LONG_S $23, VCPU_R23(k1) 286 /* Guest k0/k1 saved later */
289 LONG_S $24, VCPU_R24(k1) 287
290 LONG_S $25, VCPU_R25(k1) 288 LONG_S $28, VCPU_R28(k1)
291 289 LONG_S $29, VCPU_R29(k1)
292 /* Guest k0/k1 saved later */ 290 LONG_S $30, VCPU_R30(k1)
293 291 LONG_S $31, VCPU_R31(k1)
294 LONG_S $28, VCPU_R28(k1) 292
295 LONG_S $29, VCPU_R29(k1) 293 /* We need to save hi/lo and restore them on
296 LONG_S $30, VCPU_R30(k1) 294 * the way out
297 LONG_S $31, VCPU_R31(k1) 295 */
298 296 mfhi t0
299 /* We need to save hi/lo and restore them on 297 LONG_S t0, VCPU_HI(k1)
300 * the way out 298
301 */ 299 mflo t0
302 mfhi t0 300 LONG_S t0, VCPU_LO(k1)
303 LONG_S t0, VCPU_HI(k1) 301
304 302 /* Finally save guest k0/k1 to VCPU */
305 mflo t0 303 mfc0 t0, CP0_ERROREPC
306 LONG_S t0, VCPU_LO(k1) 304 LONG_S t0, VCPU_R26(k1)
307 305
308 /* Finally save guest k0/k1 to VCPU */ 306 /* Get GUEST k1 and save it in VCPU */
309 mfc0 t0, CP0_ERROREPC 307 PTR_LI t1, ~0x2ff
310 LONG_S t0, VCPU_R26(k1) 308 mfc0 t0, CP0_EBASE
311 309 and t0, t0, t1
312 /* Get GUEST k1 and save it in VCPU */ 310 LONG_L t0, 0x3000(t0)
313 la t1, ~0x2ff 311 LONG_S t0, VCPU_R27(k1)
314 mfc0 t0, CP0_EBASE 312
315 and t0, t0, t1 313 /* Now that context has been saved, we can use other registers */
316 LONG_L t0, 0x3000(t0) 314
317 LONG_S t0, VCPU_R27(k1) 315 /* Restore vcpu */
318 316 mfc0 a1, CP0_DDATA_LO
319 /* Now that context has been saved, we can use other registers */ 317 move s1, a1
320 318
321 /* Restore vcpu */ 319 /* Restore run (vcpu->run) */
322 mfc0 a1, CP0_DDATA_LO 320 LONG_L a0, VCPU_RUN(a1)
323 move s1, a1 321 /* Save pointer to run in s0, will be saved by the compiler */
324 322 move s0, a0
325 /* Restore run (vcpu->run) */ 323
326 LONG_L a0, VCPU_RUN(a1) 324 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
327 /* Save pointer to run in s0, will be saved by the compiler */ 325 * process the exception */
328 move s0, a0 326 mfc0 k0,CP0_EPC
329 327 LONG_S k0, VCPU_PC(k1)
330 328
331 /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ 329 mfc0 k0, CP0_BADVADDR
332 mfc0 k0,CP0_EPC 330 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
333 LONG_S k0, VCPU_PC(k1) 331
334 332 mfc0 k0, CP0_CAUSE
335 mfc0 k0, CP0_BADVADDR 333 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
336 LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) 334
337 335 mfc0 k0, CP0_ENTRYHI
338 mfc0 k0, CP0_CAUSE 336 LONG_S k0, VCPU_HOST_ENTRYHI(k1)
339 LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) 337
340 338 /* Now restore the host state just enough to run the handlers */
341 mfc0 k0, CP0_ENTRYHI 339
342 LONG_S k0, VCPU_HOST_ENTRYHI(k1) 340 /* Swtich EBASE to the one used by Linux */
343 341 /* load up the host EBASE */
344 /* Now restore the host state just enough to run the handlers */ 342 mfc0 v0, CP0_STATUS
345 343
346 /* Swtich EBASE to the one used by Linux */ 344 .set at
347 /* load up the host EBASE */ 345 or k0, v0, ST0_BEV
348 mfc0 v0, CP0_STATUS 346 .set noat
349 347
350 .set at 348 mtc0 k0, CP0_STATUS
351 or k0, v0, ST0_BEV 349 ehb
352 .set noat 350
353 351 LONG_L k0, VCPU_HOST_EBASE(k1)
354 mtc0 k0, CP0_STATUS 352 mtc0 k0,CP0_EBASE
355 ehb 353
356
357 LONG_L k0, VCPU_HOST_EBASE(k1)
358 mtc0 k0,CP0_EBASE
359
360
361 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
362 .set at
363 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
364 or v0, v0, ST0_CU0
365 .set noat
366 mtc0 v0, CP0_STATUS
367 ehb
368
369 /* Load up host GP */
370 LONG_L gp, VCPU_HOST_GP(k1)
371
372 /* Need a stack before we can jump to "C" */
373 LONG_L sp, VCPU_HOST_STACK(k1)
374
375 /* Saved host state */
376 addiu sp,sp, -PT_SIZE
377 354
378 /* XXXKYMA do we need to load the host ASID, maybe not because the 355 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
379 * kernel entries are marked GLOBAL, need to verify 356 .set at
380 */ 357 and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
358 or v0, v0, ST0_CU0
359 .set noat
360 mtc0 v0, CP0_STATUS
361 ehb
362
363 /* Load up host GP */
364 LONG_L gp, VCPU_HOST_GP(k1)
365
366 /* Need a stack before we can jump to "C" */
367 LONG_L sp, VCPU_HOST_STACK(k1)
368
369 /* Saved host state */
370 INT_ADDIU sp, sp, -PT_SIZE
381 371
382 /* Restore host DDATA_LO */ 372 /* XXXKYMA do we need to load the host ASID, maybe not because the
383 LONG_L k0, PT_HOST_USERLOCAL(sp) 373 * kernel entries are marked GLOBAL, need to verify
384 mtc0 k0, CP0_DDATA_LO 374 */
385 375
386 /* Restore RDHWR access */ 376 /* Restore host DDATA_LO */
387 la k0, 0x2000000F 377 LONG_L k0, PT_HOST_USERLOCAL(sp)
388 mtc0 k0, CP0_HWRENA 378 mtc0 k0, CP0_DDATA_LO
389 379
390 /* Jump to handler */ 380 /* Restore RDHWR access */
381 PTR_LI k0, 0x2000000F
382 mtc0 k0, CP0_HWRENA
383
384 /* Jump to handler */
391FEXPORT(__kvm_mips_jump_to_handler) 385FEXPORT(__kvm_mips_jump_to_handler)
392 /* XXXKYMA: not sure if this is safe, how large is the stack?? */ 386 /* XXXKYMA: not sure if this is safe, how large is the stack??
393 /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ 387 * Now jump to the kvm_mips_handle_exit() to see if we can deal
394 la t9,kvm_mips_handle_exit 388 * with this in the kernel */
395 jalr.hb t9 389 PTR_LA t9, kvm_mips_handle_exit
396 addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ 390 jalr.hb t9
397 391 INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
398 /* Return from handler Make sure interrupts are disabled */ 392
399 di 393 /* Return from handler Make sure interrupts are disabled */
400 ehb 394 di
401 395 ehb
402 /* XXXKYMA: k0/k1 could have been blown away if we processed an exception 396
403 * while we were handling the exception from the guest, reload k1 397 /* XXXKYMA: k0/k1 could have been blown away if we processed
404 */ 398 * an exception while we were handling the exception from the
405 move k1, s1 399 * guest, reload k1
406 addiu k1, k1, VCPU_HOST_ARCH 400 */
407 401
408 /* Check return value, should tell us if we are returning to the host (handle I/O etc) 402 move k1, s1
409 * or resuming the guest 403 INT_ADDIU k1, k1, VCPU_HOST_ARCH
410 */ 404
411 andi t0, v0, RESUME_HOST 405 /* Check return value, should tell us if we are returning to the
412 bnez t0, __kvm_mips_return_to_host 406 * host (handle I/O etc)or resuming the guest
413 nop 407 */
408 andi t0, v0, RESUME_HOST
409 bnez t0, __kvm_mips_return_to_host
410 nop
414 411
415__kvm_mips_return_to_guest: 412__kvm_mips_return_to_guest:
416 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ 413 /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
417 mtc0 s1, CP0_DDATA_LO 414 mtc0 s1, CP0_DDATA_LO
418
419 /* Load up the Guest EBASE to minimize the window where BEV is set */
420 LONG_L t0, VCPU_GUEST_EBASE(k1)
421
422 /* Switch EBASE back to the one used by KVM */
423 mfc0 v1, CP0_STATUS
424 .set at
425 or k0, v1, ST0_BEV
426 .set noat
427 mtc0 k0, CP0_STATUS
428 ehb
429 mtc0 t0,CP0_EBASE
430
431 /* Setup status register for running guest in UM */
432 .set at
433 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
434 and v1, v1, ~ST0_CU0
435 .set noat
436 mtc0 v1, CP0_STATUS
437 ehb
438 415
416 /* Load up the Guest EBASE to minimize the window where BEV is set */
417 LONG_L t0, VCPU_GUEST_EBASE(k1)
418
419 /* Switch EBASE back to the one used by KVM */
420 mfc0 v1, CP0_STATUS
421 .set at
422 or k0, v1, ST0_BEV
423 .set noat
424 mtc0 k0, CP0_STATUS
425 ehb
426 mtc0 t0, CP0_EBASE
427
428 /* Setup status register for running guest in UM */
429 .set at
430 or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
431 and v1, v1, ~ST0_CU0
432 .set noat
433 mtc0 v1, CP0_STATUS
434 ehb
439 435
440 /* Set Guest EPC */ 436 /* Set Guest EPC */
441 LONG_L t0, VCPU_PC(k1) 437 LONG_L t0, VCPU_PC(k1)
442 mtc0 t0, CP0_EPC 438 mtc0 t0, CP0_EPC
443 439
444 /* Set the ASID for the Guest Kernel */ 440 /* Set the ASID for the Guest Kernel */
445 sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 441 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
446 /* addresses shift to 0x80000000 */ 442 /* addresses shift to 0x80000000 */
447 bltz t0, 1f /* If kernel */ 443 bltz t0, 1f /* If kernel */
448 addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 444 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
449 addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ 445 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4501: 4461:
451 /* t1: contains the base of the ASID array, need to get the cpu id */ 447 /* t1: contains the base of the ASID array, need to get the cpu id */
452 LONG_L t2, TI_CPU($28) /* smp_processor_id */ 448 LONG_L t2, TI_CPU($28) /* smp_processor_id */
453 sll t2, t2, 2 /* x4 */ 449 INT_SLL t2, t2, 2 /* x4 */
454 addu t3, t1, t2 450 REG_ADDU t3, t1, t2
455 LONG_L k0, (t3) 451 LONG_L k0, (t3)
456 andi k0, k0, 0xff 452 andi k0, k0, 0xff
457 mtc0 k0,CP0_ENTRYHI 453 mtc0 k0,CP0_ENTRYHI
458 ehb 454 ehb
459 455
460 /* Disable RDHWR access */ 456 /* Disable RDHWR access */
461 mtc0 zero, CP0_HWRENA 457 mtc0 zero, CP0_HWRENA
462 458
463 /* load the guest context from VCPU and return */ 459 /* load the guest context from VCPU and return */
464 LONG_L $0, VCPU_R0(k1) 460 LONG_L $0, VCPU_R0(k1)
465 LONG_L $1, VCPU_R1(k1) 461 LONG_L $1, VCPU_R1(k1)
466 LONG_L $2, VCPU_R2(k1) 462 LONG_L $2, VCPU_R2(k1)
467 LONG_L $3, VCPU_R3(k1) 463 LONG_L $3, VCPU_R3(k1)
468 LONG_L $4, VCPU_R4(k1) 464 LONG_L $4, VCPU_R4(k1)
469 LONG_L $5, VCPU_R5(k1) 465 LONG_L $5, VCPU_R5(k1)
470 LONG_L $6, VCPU_R6(k1) 466 LONG_L $6, VCPU_R6(k1)
471 LONG_L $7, VCPU_R7(k1) 467 LONG_L $7, VCPU_R7(k1)
472 LONG_L $8, VCPU_R8(k1) 468 LONG_L $8, VCPU_R8(k1)
473 LONG_L $9, VCPU_R9(k1) 469 LONG_L $9, VCPU_R9(k1)
474 LONG_L $10, VCPU_R10(k1) 470 LONG_L $10, VCPU_R10(k1)
475 LONG_L $11, VCPU_R11(k1) 471 LONG_L $11, VCPU_R11(k1)
476 LONG_L $12, VCPU_R12(k1) 472 LONG_L $12, VCPU_R12(k1)
477 LONG_L $13, VCPU_R13(k1) 473 LONG_L $13, VCPU_R13(k1)
478 LONG_L $14, VCPU_R14(k1) 474 LONG_L $14, VCPU_R14(k1)
479 LONG_L $15, VCPU_R15(k1) 475 LONG_L $15, VCPU_R15(k1)
480 LONG_L $16, VCPU_R16(k1) 476 LONG_L $16, VCPU_R16(k1)
481 LONG_L $17, VCPU_R17(k1) 477 LONG_L $17, VCPU_R17(k1)
482 LONG_L $18, VCPU_R18(k1) 478 LONG_L $18, VCPU_R18(k1)
483 LONG_L $19, VCPU_R19(k1) 479 LONG_L $19, VCPU_R19(k1)
484 LONG_L $20, VCPU_R20(k1) 480 LONG_L $20, VCPU_R20(k1)
485 LONG_L $21, VCPU_R21(k1) 481 LONG_L $21, VCPU_R21(k1)
486 LONG_L $22, VCPU_R22(k1) 482 LONG_L $22, VCPU_R22(k1)
487 LONG_L $23, VCPU_R23(k1) 483 LONG_L $23, VCPU_R23(k1)
488 LONG_L $24, VCPU_R24(k1) 484 LONG_L $24, VCPU_R24(k1)
489 LONG_L $25, VCPU_R25(k1) 485 LONG_L $25, VCPU_R25(k1)
490 486
491 /* $/k1 loaded later */ 487 /* $/k1 loaded later */
492 LONG_L $28, VCPU_R28(k1) 488 LONG_L $28, VCPU_R28(k1)
493 LONG_L $29, VCPU_R29(k1) 489 LONG_L $29, VCPU_R29(k1)
494 LONG_L $30, VCPU_R30(k1) 490 LONG_L $30, VCPU_R30(k1)
495 LONG_L $31, VCPU_R31(k1) 491 LONG_L $31, VCPU_R31(k1)
496 492
497FEXPORT(__kvm_mips_skip_guest_restore) 493FEXPORT(__kvm_mips_skip_guest_restore)
498 LONG_L k0, VCPU_HI(k1) 494 LONG_L k0, VCPU_HI(k1)
499 mthi k0 495 mthi k0
500 496
501 LONG_L k0, VCPU_LO(k1) 497 LONG_L k0, VCPU_LO(k1)
502 mtlo k0 498 mtlo k0
503 499
504 LONG_L k0, VCPU_R26(k1) 500 LONG_L k0, VCPU_R26(k1)
505 LONG_L k1, VCPU_R27(k1) 501 LONG_L k1, VCPU_R27(k1)
506 502
507 eret 503 eret
508 504
509__kvm_mips_return_to_host: 505__kvm_mips_return_to_host:
510 /* EBASE is already pointing to Linux */ 506 /* EBASE is already pointing to Linux */
511 LONG_L k1, VCPU_HOST_STACK(k1) 507 LONG_L k1, VCPU_HOST_STACK(k1)
512 addiu k1,k1, -PT_SIZE 508 INT_ADDIU k1,k1, -PT_SIZE
513 509
514 /* Restore host DDATA_LO */ 510 /* Restore host DDATA_LO */
515 LONG_L k0, PT_HOST_USERLOCAL(k1) 511 LONG_L k0, PT_HOST_USERLOCAL(k1)
516 mtc0 k0, CP0_DDATA_LO 512 mtc0 k0, CP0_DDATA_LO
517 513
518 /* Restore host ASID */ 514 /* Restore host ASID */
519 LONG_L k0, PT_HOST_ASID(sp) 515 LONG_L k0, PT_HOST_ASID(sp)
520 andi k0, 0xff 516 andi k0, 0xff
521 mtc0 k0,CP0_ENTRYHI 517 mtc0 k0,CP0_ENTRYHI
522 ehb 518 ehb
523 519
524 /* Load context saved on the host stack */ 520 /* Load context saved on the host stack */
525 LONG_L $0, PT_R0(k1) 521 LONG_L $0, PT_R0(k1)
526 LONG_L $1, PT_R1(k1) 522 LONG_L $1, PT_R1(k1)
527 523
528 /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ 524 /* r2/v0 is the return code, shift it down by 2 (arithmetic)
529 sra k0, v0, 2 525 * to recover the err code */
530 move $2, k0 526 INT_SRA k0, v0, 2
531 527 move $2, k0
532 LONG_L $3, PT_R3(k1) 528
533 LONG_L $4, PT_R4(k1) 529 LONG_L $3, PT_R3(k1)
534 LONG_L $5, PT_R5(k1) 530 LONG_L $4, PT_R4(k1)
535 LONG_L $6, PT_R6(k1) 531 LONG_L $5, PT_R5(k1)
536 LONG_L $7, PT_R7(k1) 532 LONG_L $6, PT_R6(k1)
537 LONG_L $8, PT_R8(k1) 533 LONG_L $7, PT_R7(k1)
538 LONG_L $9, PT_R9(k1) 534 LONG_L $8, PT_R8(k1)
539 LONG_L $10, PT_R10(k1) 535 LONG_L $9, PT_R9(k1)
540 LONG_L $11, PT_R11(k1) 536 LONG_L $10, PT_R10(k1)
541 LONG_L $12, PT_R12(k1) 537 LONG_L $11, PT_R11(k1)
542 LONG_L $13, PT_R13(k1) 538 LONG_L $12, PT_R12(k1)
543 LONG_L $14, PT_R14(k1) 539 LONG_L $13, PT_R13(k1)
544 LONG_L $15, PT_R15(k1) 540 LONG_L $14, PT_R14(k1)
545 LONG_L $16, PT_R16(k1) 541 LONG_L $15, PT_R15(k1)
546 LONG_L $17, PT_R17(k1) 542 LONG_L $16, PT_R16(k1)
547 LONG_L $18, PT_R18(k1) 543 LONG_L $17, PT_R17(k1)
548 LONG_L $19, PT_R19(k1) 544 LONG_L $18, PT_R18(k1)
549 LONG_L $20, PT_R20(k1) 545 LONG_L $19, PT_R19(k1)
550 LONG_L $21, PT_R21(k1) 546 LONG_L $20, PT_R20(k1)
551 LONG_L $22, PT_R22(k1) 547 LONG_L $21, PT_R21(k1)
552 LONG_L $23, PT_R23(k1) 548 LONG_L $22, PT_R22(k1)
553 LONG_L $24, PT_R24(k1) 549 LONG_L $23, PT_R23(k1)
554 LONG_L $25, PT_R25(k1) 550 LONG_L $24, PT_R24(k1)
555 551 LONG_L $25, PT_R25(k1)
556 /* Host k0/k1 were not saved */ 552
557 553 /* Host k0/k1 were not saved */
558 LONG_L $28, PT_R28(k1) 554
559 LONG_L $29, PT_R29(k1) 555 LONG_L $28, PT_R28(k1)
560 LONG_L $30, PT_R30(k1) 556 LONG_L $29, PT_R29(k1)
561 557 LONG_L $30, PT_R30(k1)
562 LONG_L k0, PT_HI(k1) 558
563 mthi k0 559 LONG_L k0, PT_HI(k1)
564 560 mthi k0
565 LONG_L k0, PT_LO(k1) 561
566 mtlo k0 562 LONG_L k0, PT_LO(k1)
567 563 mtlo k0
568 /* Restore RDHWR access */ 564
569 la k0, 0x2000000F 565 /* Restore RDHWR access */
570 mtc0 k0, CP0_HWRENA 566 PTR_LI k0, 0x2000000F
571 567 mtc0 k0, CP0_HWRENA
572 568
573 /* Restore RA, which is the address we will return to */ 569
574 LONG_L ra, PT_R31(k1) 570 /* Restore RA, which is the address we will return to */
575 j ra 571 LONG_L ra, PT_R31(k1)
576 nop 572 j ra
577 573 nop
578 .set pop 574
579VECTOR_END(MIPSX(GuestExceptionEnd)) 575VECTOR_END(MIPSX(GuestExceptionEnd))
580.end MIPSX(GuestException) 576.end MIPSX(GuestException)
581 577
@@ -627,24 +623,23 @@ MIPSX(exceptions):
627 623
628#define HW_SYNCI_Step $1 624#define HW_SYNCI_Step $1
629LEAF(MIPSX(SyncICache)) 625LEAF(MIPSX(SyncICache))
630 .set push 626 .set push
631 .set mips32r2 627 .set mips32r2
632 beq a1, zero, 20f 628 beq a1, zero, 20f
633 nop 629 nop
634 addu a1, a0, a1 630 REG_ADDU a1, a0, a1
635 rdhwr v0, HW_SYNCI_Step 631 rdhwr v0, HW_SYNCI_Step
636 beq v0, zero, 20f 632 beq v0, zero, 20f
637 nop 633 nop
638
63910: 63410:
640 synci 0(a0) 635 synci 0(a0)
641 addu a0, a0, v0 636 REG_ADDU a0, a0, v0
642 sltu v1, a0, a1 637 sltu v1, a0, a1
643 bne v1, zero, 10b 638 bne v1, zero, 10b
644 nop 639 nop
645 sync 640 sync
64620: 64120:
647 jr.hb ra 642 jr.hb ra
648 nop 643 nop
649 .set pop 644 .set pop
650END(MIPSX(SyncICache)) 645END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index dd203e59e6fd..a7b044536de4 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -208,6 +208,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
208 return 0; 208 return 0;
209} 209}
210 210
211void kvm_arch_memslots_updated(struct kvm *kvm)
212{
213}
214
211int kvm_arch_prepare_memory_region(struct kvm *kvm, 215int kvm_arch_prepare_memory_region(struct kvm *kvm,
212 struct kvm_memory_slot *memslot, 216 struct kvm_memory_slot *memslot,
213 struct kvm_userspace_memory_region *mem, 217 struct kvm_userspace_memory_region *mem,
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9f..46048d24328c 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
803 dec_insn.next_pc_inc; 803 dec_insn.next_pc_inc;
804 return 1; 804 return 1;
805 break; 805 break;
806#ifdef CONFIG_CPU_CAVIUM_OCTEON
807 case lwc2_op: /* This is bbit0 on Octeon */
808 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
809 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
810 else
811 *contpc = regs->cp0_epc + 8;
812 return 1;
813 case ldc2_op: /* This is bbit032 on Octeon */
814 if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
815 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
816 else
817 *contpc = regs->cp0_epc + 8;
818 return 1;
819 case swc2_op: /* This is bbit1 on Octeon */
820 if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
821 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
822 else
823 *contpc = regs->cp0_epc + 8;
824 return 1;
825 case sdc2_op: /* This is bbit132 on Octeon */
826 if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
827 *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
828 else
829 *contpc = regs->cp0_epc + 8;
830 return 1;
831#endif
806 case cop0_op: 832 case cop0_op:
807 case cop1_op: 833 case cop1_op:
808 case cop2_op: 834 case cop2_op:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index af763e838fdd..5e5424753b56 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -33,7 +33,7 @@ static int op_mips_setup(void)
33 return 0; 33 return 0;
34} 34}
35 35
36static int op_mips_create_files(struct super_block *sb, struct dentry *root) 36static int op_mips_create_files(struct dentry *root)
37{ 37{
38 int i; 38 int i;
39 39
@@ -42,16 +42,16 @@ static int op_mips_create_files(struct super_block *sb, struct dentry *root)
42 char buf[4]; 42 char buf[4];
43 43
44 snprintf(buf, sizeof buf, "%d", i); 44 snprintf(buf, sizeof buf, "%d", i);
45 dir = oprofilefs_mkdir(sb, root, buf); 45 dir = oprofilefs_mkdir(root, buf);
46 46
47 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 47 oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
48 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 48 oprofilefs_create_ulong(dir, "event", &ctr[i].event);
49 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 49 oprofilefs_create_ulong(dir, "count", &ctr[i].count);
50 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 50 oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
51 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 51 oprofilefs_create_ulong(dir, "user", &ctr[i].user);
52 oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl); 52 oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
53 /* Dummy. */ 53 /* Dummy. */
54 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 54 oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
55 } 55 }
56 56
57 return 0; 57 return 0;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140cdae0..3a2b6e9f25cf 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
166 reg.control[i] |= M_PERFCTL_USER; 166 reg.control[i] |= M_PERFCTL_USER;
167 if (ctr[i].exl) 167 if (ctr[i].exl)
168 reg.control[i] |= M_PERFCTL_EXL; 168 reg.control[i] |= M_PERFCTL_EXL;
169 if (current_cpu_type() == CPU_XLR) 169 if (boot_cpu_type() == CPU_XLR)
170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; 170 reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
171 reg.counter[i] = 0x80000000 - ctr[i].count; 171 reg.counter[i] = 0x80000000 - ctr[i].count;
172 } 172 }
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 594e60d6a43b..33e7aa52d9c4 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -113,7 +113,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
113 if (!pci_has_flag(PCI_PROBE_ONLY)) { 113 if (!pci_has_flag(PCI_PROBE_ONLY)) {
114 pci_bus_size_bridges(bus); 114 pci_bus_size_bridges(bus);
115 pci_bus_assign_resources(bus); 115 pci_bus_assign_resources(bus);
116 pci_enable_bridges(bus);
117 } 116 }
118 } 117 }
119} 118}
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d6f289..2b7e837dc2e2 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
206 .end = PNX8335_IP3902_PORTS_END, 206 .end = PNX8335_IP3902_PORTS_END,
207 .flags = IORESOURCE_MEM, 207 .flags = IORESOURCE_MEM,
208 }, 208 },
209#ifdef CONFIG_SOC_PNX8335
209 [1] = { 210 [1] = {
210 .start = PNX8335_PIC_ETHERNET_INT, 211 .start = PNX8335_PIC_ETHERNET_INT,
211 .end = PNX8335_PIC_ETHERNET_INT, 212 .end = PNX8335_PIC_ETHERNET_INT,
212 .flags = IORESOURCE_IRQ, 213 .flags = IORESOURCE_IRQ,
213 }, 214 },
215#endif
214}; 216};
215 217
216static struct platform_device pnx833x_ethernet_device = { 218static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index dd0ab982d77e..f9407e170476 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -122,7 +122,6 @@ static struct resource sc26xx_rsrc[] = {
122 122
123static struct sccnxp_pdata sccnxp_data = { 123static struct sccnxp_pdata sccnxp_data = {
124 .reg_shift = 2, 124 .reg_shift = 2,
125 .frequency = 3686400,
126 .mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) | 125 .mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) |
127 MCTRL_SIG(RTS_OP, LINE_OP3) | 126 MCTRL_SIG(RTS_OP, LINE_OP3) |
128 MCTRL_SIG(DSR_IP, LINE_IP5) | 127 MCTRL_SIG(DSR_IP, LINE_IP5) |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59ac..d60bf98fa5cf 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
55 55
56source "init/Kconfig" 56source "init/Kconfig"
57 57
58source "kernel/Kconfig.freezer"
58 59
59menu "Processor type and features" 60menu "Processor type and features"
60 61
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index bbb34e5343a2..eb59bfe23e85 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
44 44
45extern void kdump_move_device_tree(void); 45extern void kdump_move_device_tree(void);
46 46
47/* CPU OF node matching */
48struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
49
50/* Get the MAC address */ 47/* Get the MAC address */
51extern const void *of_get_mac_address(struct device_node *np); 48extern const void *of_get_mac_address(struct device_node *np);
52 49
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 07349b002687..1cba8f29bb49 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -78,7 +78,7 @@ restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs)
78 err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq)); 78 err |= __copy_from_user(regs->iaoq, sc->sc_iaoq, sizeof(regs->iaoq));
79 err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq)); 79 err |= __copy_from_user(regs->iasq, sc->sc_iasq, sizeof(regs->iasq));
80 err |= __get_user(regs->sar, &sc->sc_sar); 80 err |= __get_user(regs->sar, &sc->sc_sar);
81 DBG(2,"restore_sigcontext: iaoq is 0x%#lx / 0x%#lx\n", 81 DBG(2,"restore_sigcontext: iaoq is %#lx / %#lx\n",
82 regs->iaoq[0],regs->iaoq[1]); 82 regs->iaoq[0],regs->iaoq[1]);
83 DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]); 83 DBG(2,"restore_sigcontext: r28 is %ld\n", regs->gr[28]);
84 return err; 84 return err;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd2c8fc..a4e3a93bf2d4 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -312,6 +312,26 @@ config MATH_EMULATION
312 such as fsqrt on cores that do have an FPU but do not implement 312 such as fsqrt on cores that do have an FPU but do not implement
313 them (such as Freescale BookE). 313 them (such as Freescale BookE).
314 314
315choice
316 prompt "Math emulation options"
317 default MATH_EMULATION_FULL
318 depends on MATH_EMULATION
319
320config MATH_EMULATION_FULL
321 bool "Emulate all the floating point instructions"
322 ---help---
323 Select this option will enable the kernel to support to emulate
324 all the floating point instructions. If your SoC doesn't have
325 a FPU, you should select this.
326
327config MATH_EMULATION_HW_UNIMPLEMENTED
328 bool "Just emulate the FPU unimplemented instructions"
329 ---help---
330 Select this if you know there does have a hardware FPU on your
331 SoC, but some floating point instructions are not implemented by that.
332
333endchoice
334
315config PPC_TRANSACTIONAL_MEM 335config PPC_TRANSACTIONAL_MEM
316 bool "Transactional Memory support for POWERPC" 336 bool "Transactional Memory support for POWERPC"
317 depends on PPC_BOOK3S_64 337 depends on PPC_BOOK3S_64
@@ -369,9 +389,9 @@ config KEXEC
369 389
370 It is an ongoing process to be certain the hardware in a machine 390 It is an ongoing process to be certain the hardware in a machine
371 is properly shutdown, so do not be surprised if this code does not 391 is properly shutdown, so do not be surprised if this code does not
372 initially work for you. It may help to enable device hotplugging 392 initially work for you. As of this writing the exact hardware
373 support. As of this writing the exact hardware interface is 393 interface is strongly in flux, so no good recommendation can be
374 strongly in flux, so no good recommendation can be made. 394 made.
375 395
376config CRASH_DUMP 396config CRASH_DUMP
377 bool "Build a kdump crash kernel" 397 bool "Build a kdump crash kernel"
@@ -566,7 +586,7 @@ config SCHED_SMT
566config PPC_DENORMALISATION 586config PPC_DENORMALISATION
567 bool "PowerPC denormalisation exception handling" 587 bool "PowerPC denormalisation exception handling"
568 depends on PPC_BOOK3S_64 588 depends on PPC_BOOK3S_64
569 default "n" 589 default "y" if PPC_POWERNV
570 ---help--- 590 ---help---
571 Add support for handling denormalisation of single precision 591 Add support for handling denormalisation of single precision
572 values. Useful for bare metal only. If unsure say Y here. 592 values. Useful for bare metal only. If unsure say Y here.
@@ -979,6 +999,7 @@ config RELOCATABLE
979 must live at a different physical address than the primary 999 must live at a different physical address than the primary
980 kernel. 1000 kernel.
981 1001
1002# This value must have zeroes in the bottom 60 bits otherwise lots will break
982config PAGE_OFFSET 1003config PAGE_OFFSET
983 hex 1004 hex
984 default "0xc000000000000000" 1005 default "0xc000000000000000"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 967fd23ace78..51cfb78d4061 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -88,13 +88,30 @@ CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,-mminimal-toc)
88CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) 88CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
89CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple 89CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
90 90
91ifeq ($(CONFIG_PPC_BOOK3S_64),y)
91CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4) 92CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
93else
94CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
95endif
96
92CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell) 97CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
93CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4) 98CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
94CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5) 99CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
95CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6) 100CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
96CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7) 101CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
97 102
103E5500_CPU := $(call cc-option,-mcpu=e500mc64,-mcpu=powerpc64)
104CFLAGS-$(CONFIG_E5500_CPU) += $(E5500_CPU)
105CFLAGS-$(CONFIG_E6500_CPU) += $(call cc-option,-mcpu=e6500,$(E5500_CPU))
106
107ifeq ($(CONFIG_PPC32),y)
108ifeq ($(CONFIG_PPC_E500MC),y)
109CFLAGS-y += $(call cc-option,-mcpu=e500mc,-mcpu=powerpc)
110else
111CFLAGS-$(CONFIG_E500) += $(call cc-option,-mcpu=8540 -msoft-float,-mcpu=powerpc)
112endif
113endif
114
98CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell) 115CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
99 116
100KBUILD_CPPFLAGS += -Iarch/$(ARCH) 117KBUILD_CPPFLAGS += -Iarch/$(ARCH)
@@ -139,7 +156,6 @@ endif
139 156
140cpu-as-$(CONFIG_4xx) += -Wa,-m405 157cpu-as-$(CONFIG_4xx) += -Wa,-m405
141cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec 158cpu-as-$(CONFIG_ALTIVEC) += -Wa,-maltivec
142cpu-as-$(CONFIG_E500) += -Wa,-me500
143cpu-as-$(CONFIG_E200) += -Wa,-me200 159cpu-as-$(CONFIG_E200) += -Wa,-me200
144 160
145KBUILD_AFLAGS += $(cpu-as-y) 161KBUILD_AFLAGS += $(cpu-as-y)
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index c32ae5ce9fff..554734ff302e 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -22,6 +22,7 @@ zImage.initrd
22zImage.bin.* 22zImage.bin.*
23zImage.chrp 23zImage.chrp
24zImage.coff 24zImage.coff
25zImage.epapr
25zImage.holly 26zImage.holly
26zImage.*lds 27zImage.*lds
27zImage.miboot 28zImage.miboot
diff --git a/arch/powerpc/boot/dts/ac14xx.dts b/arch/powerpc/boot/dts/ac14xx.dts
index a27a4609bb42..a543c4088cba 100644
--- a/arch/powerpc/boot/dts/ac14xx.dts
+++ b/arch/powerpc/boot/dts/ac14xx.dts
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12 12
13/include/ "mpc5121.dtsi" 13#include <mpc5121.dtsi>
14 14
15/ { 15/ {
16 model = "ac14xx"; 16 model = "ac14xx";
diff --git a/arch/powerpc/boot/dts/b4420qds.dts b/arch/powerpc/boot/dts/b4420qds.dts
index 923156d03b30..508dbdf33c81 100644
--- a/arch/powerpc/boot/dts/b4420qds.dts
+++ b/arch/powerpc/boot/dts/b4420qds.dts
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35/include/ "fsl/b4420si-pre.dtsi" 35/include/ "fsl/b4420si-pre.dtsi"
36/include/ "b4qds.dts" 36/include/ "b4qds.dtsi"
37 37
38/ { 38/ {
39 model = "fsl,B4420QDS"; 39 model = "fsl,B4420QDS";
diff --git a/arch/powerpc/boot/dts/b4860qds.dts b/arch/powerpc/boot/dts/b4860qds.dts
index 78907f38bb77..6bb3707ffe3d 100644
--- a/arch/powerpc/boot/dts/b4860qds.dts
+++ b/arch/powerpc/boot/dts/b4860qds.dts
@@ -33,7 +33,7 @@
33 */ 33 */
34 34
35/include/ "fsl/b4860si-pre.dtsi" 35/include/ "fsl/b4860si-pre.dtsi"
36/include/ "b4qds.dts" 36/include/ "b4qds.dtsi"
37 37
38/ { 38/ {
39 model = "fsl,B4860QDS"; 39 model = "fsl,B4860QDS";
diff --git a/arch/powerpc/boot/dts/b4qds.dts b/arch/powerpc/boot/dts/b4qds.dtsi
index e6d2f8f90544..e6d2f8f90544 100644
--- a/arch/powerpc/boot/dts/b4qds.dts
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
diff --git a/arch/powerpc/boot/dts/c293pcie.dts b/arch/powerpc/boot/dts/c293pcie.dts
new file mode 100644
index 000000000000..1238bda8901f
--- /dev/null
+++ b/arch/powerpc/boot/dts/c293pcie.dts
@@ -0,0 +1,223 @@
1/*
2 * C293 PCIE Device Tree Source
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/c293si-pre.dtsi"
36
37/ {
38 model = "fsl,C293PCIE";
39 compatible = "fsl,C293PCIE";
40
41 memory {
42 device_type = "memory";
43 };
44
45 ifc: ifc@fffe1e000 {
46 reg = <0xf 0xffe1e000 0 0x2000>;
47 ranges = <0x0 0x0 0xf 0xec000000 0x04000000
48 0x2 0x0 0xf 0xffdf0000 0x00010000>;
49
50 };
51
52 soc: soc@fffe00000 {
53 ranges = <0x0 0xf 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@fffe0a000 {
57 reg = <0xf 0xffe0a000 0 0x1000>;
58 ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
59 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0x80000000
62 0x2000000 0x0 0x80000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70};
71
72&ifc {
73 nor@0,0 {
74 #address-cells = <1>;
75 #size-cells = <1>;
76 compatible = "cfi-flash";
77 reg = <0x0 0x0 0x4000000>;
78 bank-width = <2>;
79 device-width = <1>;
80
81 partition@0 {
82 /* 1MB for DTB Image */
83 reg = <0x0 0x00100000>;
84 label = "NOR DTB Image";
85 };
86
87 partition@100000 {
88 /* 8 MB for Linux Kernel Image */
89 reg = <0x00100000 0x00800000>;
90 label = "NOR Linux Kernel Image";
91 };
92
93 partition@900000 {
94 /* 53MB for rootfs */
95 reg = <0x00900000 0x03500000>;
96 label = "NOR Rootfs Image";
97 };
98
99 partition@3e00000 {
100 /* 1MB for blob encrypted key */
101 reg = <0x03e00000 0x00100000>;
102 label = "NOR blob encrypted key";
103 };
104
105 partition@3f00000 {
106 /* 512KB for u-boot Bootloader Image and evn */
107 reg = <0x03f00000 0x00100000>;
108 label = "NOR U-Boot Image";
109 read-only;
110 };
111 };
112
113 nand@1,0 {
114 #address-cells = <1>;
115 #size-cells = <1>;
116 compatible = "fsl,ifc-nand";
117 reg = <0x1 0x0 0x10000>;
118
119 partition@0 {
120 /* This location must not be altered */
121 /* 1MB for u-boot Bootloader Image */
122 reg = <0x0 0x00100000>;
123 label = "NAND U-Boot Image";
124 read-only;
125 };
126
127 partition@100000 {
128 /* 1MB for DTB Image */
129 reg = <0x00100000 0x00100000>;
130 label = "NAND DTB Image";
131 };
132
133 partition@200000 {
134 /* 16MB for Linux Kernel Image */
135 reg = <0x00200000 0x01000000>;
136 label = "NAND Linux Kernel Image";
137 };
138
139 partition@1200000 {
140 /* 4078MB for Root file System Image */
141 reg = <0x00600000 0xfee00000>;
142 label = "NAND RFS Image";
143 };
144 };
145
146 cpld@2,0 {
147 compatible = "fsl,c293pcie-cpld";
148 reg = <0x2 0x0 0x20>;
149 };
150};
151
152&soc {
153 i2c@3000 {
154 eeprom@50 {
155 compatible = "st,24c1024";
156 reg = <0x50>;
157 };
158
159 adt7461@4c {
160 compatible = "adi,adt7461";
161 reg = <0x4c>;
162 };
163 };
164
165 spi@7000 {
166 flash@0 {
167 #address-cells = <1>;
168 #size-cells = <1>;
169 compatible = "spansion,s25sl12801";
170 reg = <0>;
171 spi-max-frequency = <50000000>;
172
173 partition@0 {
174 /* 1MB for u-boot Bootloader Image */
175 /* 1MB for Environment */
176 reg = <0x0 0x00100000>;
177 label = "SPI Flash U-Boot Image";
178 read-only;
179 };
180
181 partition@100000 {
182 /* 512KB for DTB Image */
183 reg = <0x00100000 0x00080000>;
184 label = "SPI Flash DTB Image";
185 };
186
187 partition@180000 {
188 /* 4MB for Linux Kernel Image */
189 reg = <0x00180000 0x00400000>;
190 label = "SPI Flash Linux Kernel Image";
191 };
192
193 partition@580000 {
194 /* 10.5MB for RFS Image */
195 reg = <0x00580000 0x00a80000>;
196 label = "SPI Flash RFS Image";
197 };
198 };
199 };
200
201 mdio@24000 {
202 phy0: ethernet-phy@0 {
203 interrupts = <2 1 0 0>;
204 reg = <0x0>;
205 };
206
207 phy1: ethernet-phy@1 {
208 interrupts = <2 1 0 0>;
209 reg = <0x2>;
210 };
211 };
212
213 enet0: ethernet@b0000 {
214 phy-handle = <&phy0>;
215 phy-connection-type = "rgmii-id";
216 };
217
218 enet1: ethernet@b1000 {
219 phy-handle = <&phy1>;
220 phy-connection-type = "rgmii-id";
221 };
222};
223/include/ "fsl/c293si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 73991547c69b..4c617bf8cdb2 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -204,7 +204,7 @@
204 }; 204 };
205 }; 205 };
206 206
207/include/ "qoriq-mpic.dtsi" 207/include/ "qoriq-mpic4.3.dtsi"
208 208
209 guts: global-utilities@e0000 { 209 guts: global-utilities@e0000 {
210 compatible = "fsl,b4-device-config"; 210 compatible = "fsl,b4-device-config";
diff --git a/arch/powerpc/boot/dts/fsl/c293si-post.dtsi b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
new file mode 100644
index 000000000000..bd208320bff5
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/c293si-post.dtsi
@@ -0,0 +1,193 @@
1/*
2 * C293 Silicon/SoC Device Tree Source (post include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&ifc {
36 #address-cells = <2>;
37 #size-cells = <1>;
38 compatible = "fsl,ifc", "simple-bus";
39 interrupts = <19 2 0 0>;
40};
41
42/* controller at 0xa000 */
43&pci0 {
44 compatible = "fsl,qoriq-pcie-v2.2", "fsl,qoriq-pcie";
45 device_type = "pci";
46 #size-cells = <2>;
47 #address-cells = <3>;
48 bus-range = <0 255>;
49 clock-frequency = <33333333>;
50 interrupts = <16 2 0 0>;
51
52 pcie@0 {
53 reg = <0 0 0 0 0>;
54 #interrupt-cells = <1>;
55 #size-cells = <2>;
56 #address-cells = <3>;
57 device_type = "pci";
58 interrupts = <16 2 0 0>;
59 interrupt-map-mask = <0xf800 0 0 7>;
60 interrupt-map = <
61 /* IDSEL 0x0 */
62 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0
63 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0
64 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0
65 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0
66 >;
67 };
68};
69
70&soc {
71 #address-cells = <1>;
72 #size-cells = <1>;
73 device_type = "soc";
74 compatible = "simple-bus";
75 bus-frequency = <0>; // Filled out by uboot.
76
77 ecm-law@0 {
78 compatible = "fsl,ecm-law";
79 reg = <0x0 0x1000>;
80 fsl,num-laws = <12>;
81 };
82
83 ecm@1000 {
84 compatible = "fsl,c293-ecm", "fsl,ecm";
85 reg = <0x1000 0x1000>;
86 interrupts = <16 2 0 0>;
87 };
88
89 memory-controller@2000 {
90 compatible = "fsl,c293-memory-controller";
91 reg = <0x2000 0x1000>;
92 interrupts = <16 2 0 0>;
93 };
94
95/include/ "pq3-i2c-0.dtsi"
96/include/ "pq3-i2c-1.dtsi"
97/include/ "pq3-duart-0.dtsi"
98/include/ "pq3-espi-0.dtsi"
99 spi0: spi@7000 {
100 fsl,espi-num-chipselects = <1>;
101 };
102
103/include/ "pq3-gpio-0.dtsi"
104 L2: l2-cache-controller@20000 {
105 compatible = "fsl,c293-l2-cache-controller";
106 reg = <0x20000 0x1000>;
107 cache-line-size = <32>; // 32 bytes
108 cache-size = <0x80000>; // L2,512K
109 interrupts = <16 2 0 0>;
110 };
111
112/include/ "pq3-dma-0.dtsi"
113/include/ "pq3-esdhc-0.dtsi"
114 sdhc@2e000 {
115 compatible = "fsl,c293-esdhc", "fsl,esdhc";
116 sdhci,auto-cmd12;
117 };
118
119 crypto@80000 {
120/include/ "qoriq-sec6.0-0.dtsi"
121 };
122
123 crypto@80000 {
124 reg = <0x80000 0x20000>;
125 ranges = <0x0 0x80000 0x20000>;
126
127 jr@1000{
128 interrupts = <45 2 0 0>;
129 };
130 jr@2000{
131 interrupts = <57 2 0 0>;
132 };
133 };
134
135 crypto@a0000 {
136/include/ "qoriq-sec6.0-0.dtsi"
137 };
138
139 crypto@a0000 {
140 reg = <0xa0000 0x20000>;
141 ranges = <0x0 0xa0000 0x20000>;
142
143 jr@1000{
144 interrupts = <49 2 0 0>;
145 };
146 jr@2000{
147 interrupts = <50 2 0 0>;
148 };
149 };
150
151 crypto@c0000 {
152/include/ "qoriq-sec6.0-0.dtsi"
153 };
154
155 crypto@c0000 {
156 reg = <0xc0000 0x20000>;
157 ranges = <0x0 0xc0000 0x20000>;
158
159 jr@1000{
160 interrupts = <55 2 0 0>;
161 };
162 jr@2000{
163 interrupts = <56 2 0 0>;
164 };
165 };
166
167/include/ "pq3-mpic.dtsi"
168/include/ "pq3-mpic-timer-B.dtsi"
169
170/include/ "pq3-etsec2-0.dtsi"
171 enet0: ethernet@b0000 {
172 queue-group@b0000 {
173 reg = <0x10000 0x1000>;
174 fsl,rx-bit-map = <0xff>;
175 fsl,tx-bit-map = <0xff>;
176 };
177 };
178
179/include/ "pq3-etsec2-1.dtsi"
180 enet1: ethernet@b1000 {
181 queue-group@b1000 {
182 reg = <0x11000 0x1000>;
183 fsl,rx-bit-map = <0xff>;
184 fsl,tx-bit-map = <0xff>;
185 };
186 };
187
188 global-utilities@e0000 {
189 compatible = "fsl,c293-guts";
190 reg = <0xe0000 0x1000>;
191 fsl,has-rstcr;
192 };
193};
diff --git a/arch/powerpc/boot/dts/fsl/c293si-pre.dtsi b/arch/powerpc/boot/dts/fsl/c293si-pre.dtsi
new file mode 100644
index 000000000000..065049d76245
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/c293si-pre.dtsi
@@ -0,0 +1,63 @@
1/*
2 * C293 Silicon/SoC Device Tree Source (pre include)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/dts-v1/;
36
37/include/ "e500v2_power_isa.dtsi"
38
39/ {
40 compatible = "fsl,C293";
41 #address-cells = <2>;
42 #size-cells = <2>;
43 interrupt-parent = <&mpic>;
44
45 aliases {
46 serial0 = &serial0;
47 serial1 = &serial1;
48 ethernet0 = &enet0;
49 ethernet1 = &enet1;
50 pci0 = &pci0;
51 };
52
53 cpus {
54 #address-cells = <1>;
55 #size-cells = <0>;
56
57 PowerPC,e500v2@0 {
58 device_type = "cpu";
59 reg = <0x0>;
60 next-level-cache = <&L2>;
61 };
62 };
63};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi
new file mode 100644
index 000000000000..64f713c24825
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-mpic4.3.dtsi
@@ -0,0 +1,149 @@
1/*
2 * QorIQ MPIC device tree stub [ controller @ offset 0x40000 ]
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35mpic: pic@40000 {
36 interrupt-controller;
37 #address-cells = <0>;
38 #interrupt-cells = <4>;
39 reg = <0x40000 0x40000>;
40 compatible = "fsl,mpic";
41 device_type = "open-pic";
42 clock-frequency = <0x0>;
43};
44
45timer@41100 {
46 compatible = "fsl,mpic-global-timer";
47 reg = <0x41100 0x100 0x41300 4>;
48 interrupts = <0 0 3 0
49 1 0 3 0
50 2 0 3 0
51 3 0 3 0>;
52};
53
54msi0: msi@41600 {
55 compatible = "fsl,mpic-msi-v4.3";
56 reg = <0x41600 0x200 0x44148 4>;
57 interrupts = <
58 0xe0 0 0 0
59 0xe1 0 0 0
60 0xe2 0 0 0
61 0xe3 0 0 0
62 0xe4 0 0 0
63 0xe5 0 0 0
64 0xe6 0 0 0
65 0xe7 0 0 0
66 0x100 0 0 0
67 0x101 0 0 0
68 0x102 0 0 0
69 0x103 0 0 0
70 0x104 0 0 0
71 0x105 0 0 0
72 0x106 0 0 0
73 0x107 0 0 0>;
74};
75
76msi1: msi@41800 {
77 compatible = "fsl,mpic-msi-v4.3";
78 reg = <0x41800 0x200 0x45148 4>;
79 interrupts = <
80 0xe8 0 0 0
81 0xe9 0 0 0
82 0xea 0 0 0
83 0xeb 0 0 0
84 0xec 0 0 0
85 0xed 0 0 0
86 0xee 0 0 0
87 0xef 0 0 0
88 0x108 0 0 0
89 0x109 0 0 0
90 0x10a 0 0 0
91 0x10b 0 0 0
92 0x10c 0 0 0
93 0x10d 0 0 0
94 0x10e 0 0 0
95 0x10f 0 0 0>;
96};
97
98msi2: msi@41a00 {
99 compatible = "fsl,mpic-msi-v4.3";
100 reg = <0x41a00 0x200 0x46148 4>;
101 interrupts = <
102 0xf0 0 0 0
103 0xf1 0 0 0
104 0xf2 0 0 0
105 0xf3 0 0 0
106 0xf4 0 0 0
107 0xf5 0 0 0
108 0xf6 0 0 0
109 0xf7 0 0 0
110 0x110 0 0 0
111 0x111 0 0 0
112 0x112 0 0 0
113 0x113 0 0 0
114 0x114 0 0 0
115 0x115 0 0 0
116 0x116 0 0 0
117 0x117 0 0 0>;
118};
119
120msi3: msi@41c00 {
121 compatible = "fsl,mpic-msi-v4.3";
122 reg = <0x41c00 0x200 0x47148 4>;
123 interrupts = <
124 0xf8 0 0 0
125 0xf9 0 0 0
126 0xfa 0 0 0
127 0xfb 0 0 0
128 0xfc 0 0 0
129 0xfd 0 0 0
130 0xfe 0 0 0
131 0xff 0 0 0
132 0x118 0 0 0
133 0x119 0 0 0
134 0x11a 0 0 0
135 0x11b 0 0 0
136 0x11c 0 0 0
137 0x11d 0 0 0
138 0x11e 0 0 0
139 0x11f 0 0 0>;
140};
141
142timer@42100 {
143 compatible = "fsl,mpic-global-timer";
144 reg = <0x42100 0x100 0x42300 4>;
145 interrupts = <4 0 3 0
146 5 0 3 0
147 6 0 3 0
148 7 0 3 0>;
149};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
new file mode 100644
index 000000000000..f75b4f820c3c
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
@@ -0,0 +1,56 @@
1/*
2 * QorIQ Sec/Crypto 6.0 device tree stub
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 compatible = "fsl,sec-v6.0";
36 fsl,sec-era = <6>;
37 #address-cells = <1>;
38 #size-cells = <1>;
39
40 jr@1000 {
41 compatible = "fsl,sec-v6.0-job-ring",
42 "fsl,sec-v5.2-job-ring",
43 "fsl,sec-v5.0-job-ring",
44 "fsl,sec-v4.4-job-ring",
45 "fsl,sec-v4.0-job-ring";
46 reg = <0x1000 0x1000>;
47 };
48
49 jr@2000 {
50 compatible = "fsl,sec-v6.0-job-ring",
51 "fsl,sec-v5.2-job-ring",
52 "fsl,sec-v5.0-job-ring",
53 "fsl,sec-v4.4-job-ring",
54 "fsl,sec-v4.0-job-ring";
55 reg = <0x2000 0x1000>;
56 };
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index bd611a9cad32..510afa362de1 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -358,7 +358,7 @@
358 16 2 1 30>; 358 16 2 1 30>;
359 }; 359 };
360 360
361/include/ "qoriq-mpic.dtsi" 361/include/ "qoriq-mpic4.3.dtsi"
362 362
363 guts: global-utilities@e0000 { 363 guts: global-utilities@e0000 {
364 compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0"; 364 compatible = "fsl,t4240-device-config", "fsl,qoriq-device-config-2.0";
diff --git a/arch/powerpc/boot/dts/include/dt-bindings b/arch/powerpc/boot/dts/include/dt-bindings
new file mode 120000
index 000000000000..08c00e4972fa
--- /dev/null
+++ b/arch/powerpc/boot/dts/include/dt-bindings
@@ -0,0 +1 @@
../../../../../include/dt-bindings \ No newline at end of file
diff --git a/arch/powerpc/boot/dts/mpc5121ads.dts b/arch/powerpc/boot/dts/mpc5121ads.dts
index 7d3cb79185cb..c228a0a232a6 100644
--- a/arch/powerpc/boot/dts/mpc5121ads.dts
+++ b/arch/powerpc/boot/dts/mpc5121ads.dts
@@ -9,7 +9,7 @@
9 * option) any later version. 9 * option) any later version.
10 */ 10 */
11 11
12/include/ "mpc5121.dtsi" 12#include <mpc5121.dtsi>
13 13
14/ { 14/ {
15 model = "mpc5121ads"; 15 model = "mpc5121ads";
diff --git a/arch/powerpc/boot/dts/p1020rdb-pd.dts b/arch/powerpc/boot/dts/p1020rdb-pd.dts
new file mode 100644
index 000000000000..987017ea36b6
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pd.dts
@@ -0,0 +1,280 @@
1/*
2 * P1020 RDB-PD Device Tree Source (32-bit address map)
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1020si-pre.dtsi"
36/ {
37 model = "fsl,P1020RDB-PD";
38 compatible = "fsl,P1020RDB-PD";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@ffe05000 {
45 reg = <0x0 0xffe05000 0x0 0x1000>;
46
47 /* NOR, NAND flash, L2 switch and CPLD */
48 ranges = <0x0 0x0 0x0 0xec000000 0x04000000
49 0x1 0x0 0x0 0xff800000 0x00040000
50 0x2 0x0 0x0 0xffa00000 0x00020000
51 0x3 0x0 0x0 0xffb00000 0x00020000>;
52
53 nor@0,0 {
54 #address-cells = <1>;
55 #size-cells = <1>;
56 compatible = "cfi-flash";
57 reg = <0x0 0x0 0x4000000>;
58 bank-width = <2>;
59 device-width = <1>;
60
61 partition@0 {
62 /* 128KB for DTB Image */
63 reg = <0x0 0x00020000>;
64 label = "NOR DTB Image";
65 };
66
67 partition@20000 {
68 /* 3.875 MB for Linux Kernel Image */
69 reg = <0x00020000 0x003e0000>;
70 label = "NOR Linux Kernel Image";
71 };
72
73 partition@400000 {
74 /* 58MB for Root file System */
75 reg = <0x00400000 0x03a00000>;
76 label = "NOR Root File System";
77 };
78
79 partition@3e00000 {
80 /* This location must not be altered */
81 /* 1M for Vitesse 7385 Switch firmware */
82 reg = <0x3e00000 0x00100000>;
83 label = "NOR Vitesse-7385 Firmware";
84 read-only;
85 };
86
87 partition@3f00000 {
88 /* This location must not be altered */
89 /* 512KB for u-boot Bootloader Image */
90 /* 512KB for u-boot Environment Variables */
91 reg = <0x03f00000 0x00100000>;
92 label = "NOR U-Boot Image";
93 read-only;
94 };
95 };
96
97 nand@1,0 {
98 #address-cells = <1>;
99 #size-cells = <1>;
100 compatible = "fsl,p1020-fcm-nand",
101 "fsl,elbc-fcm-nand";
102 reg = <0x1 0x0 0x40000>;
103
104 partition@0 {
105 /* This location must not be altered */
106 /* 1MB for u-boot Bootloader Image */
107 reg = <0x0 0x00100000>;
108 label = "NAND U-Boot Image";
109 read-only;
110 };
111
112 partition@100000 {
113 /* 1MB for DTB Image */
114 reg = <0x00100000 0x00100000>;
115 label = "NAND DTB Image";
116 };
117
118 partition@200000 {
119 /* 4MB for Linux Kernel Image */
120 reg = <0x00200000 0x00400000>;
121 label = "NAND Linux Kernel Image";
122 };
123
124 partition@600000 {
125 /* 122MB for File System Image */
126 reg = <0x00600000 0x07a00000>;
127 label = "NAND File System Image";
128 };
129 };
130
131 cpld@2,0 {
132 compatible = "fsl,p1020rdb-pd-cpld";
133 reg = <0x2 0x0 0x20000>;
134 };
135
136 L2switch@3,0 {
137 #address-cells = <1>;
138 #size-cells = <1>;
139 compatible = "vitesse-7385";
140 reg = <0x3 0x0 0x20000>;
141 };
142 };
143
144 soc: soc@ffe00000 {
145 ranges = <0x0 0x0 0xffe00000 0x100000>;
146
147 i2c@3000 {
148 rtc@68 {
149 compatible = "dallas,ds1339";
150 reg = <0x68>;
151 };
152 };
153
154 spi@7000 {
155 flash@0 {
156 #address-cells = <1>;
157 #size-cells = <1>;
158 compatible = "spansion,s25sl12801";
159 reg = <0>;
160 /* input clock */
161 spi-max-frequency = <40000000>;
162
163 partition@0 {
164 /* 512KB for u-boot Bootloader Image */
165 reg = <0x0 0x00080000>;
166 label = "SPI U-Boot Image";
167 read-only;
168 };
169
170 partition@80000 {
171 /* 512KB for DTB Image*/
172 reg = <0x00080000 0x00080000>;
173 label = "SPI DTB Image";
174 };
175
176 partition@100000 {
177 /* 4MB for Linux Kernel Image */
178 reg = <0x00100000 0x00400000>;
179 label = "SPI Linux Kernel Image";
180 };
181
182 partition@500000 {
183 /* 11MB for FS System Image */
184 reg = <0x00500000 0x00b00000>;
185 label = "SPI File System Image";
186 };
187 };
188
189 slic@0 {
190 compatible = "zarlink,le88266";
191 reg = <1>;
192 spi-max-frequency = <8000000>;
193 };
194
195 slic@1 {
196 compatible = "zarlink,le88266";
197 reg = <2>;
198 spi-max-frequency = <8000000>;
199 };
200 };
201
202 mdio@24000 {
203 phy0: ethernet-phy@0 {
204 interrupts = <3 1 0 0>;
205 reg = <0x0>;
206 };
207
208 phy1: ethernet-phy@1 {
209 interrupts = <2 1 0 0>;
210 reg = <0x1>;
211 };
212 };
213
214 mdio@25000 {
215 tbi1: tbi-phy@11 {
216 reg = <0x11>;
217 device_type = "tbi-phy";
218 };
219 };
220
221 mdio@26000 {
222 tbi2: tbi-phy@11 {
223 reg = <0x11>;
224 device_type = "tbi-phy";
225 };
226 };
227
228 enet0: ethernet@b0000 {
229 fixed-link = <1 1 1000 0 0>;
230 phy-connection-type = "rgmii-id";
231 };
232
233 enet1: ethernet@b1000 {
234 phy-handle = <&phy0>;
235 tbi-handle = <&tbi1>;
236 phy-connection-type = "sgmii";
237 };
238
239 enet2: ethernet@b2000 {
240 phy-handle = <&phy1>;
241 phy-connection-type = "rgmii-id";
242 };
243
244 usb@22000 {
245 phy_type = "ulpi";
246 };
247 };
248
249 pci0: pcie@ffe09000 {
250 reg = <0x0 0xffe09000 0x0 0x1000>;
251 ranges = <0x2000000 0x0 0xa0000000 0x0 0xa0000000 0x0 0x20000000
252 0x1000000 0x0 0x00000000 0x0 0xffc10000 0x0 0x10000>;
253 pcie@0 {
254 ranges = <0x2000000 0x0 0xa0000000
255 0x2000000 0x0 0xa0000000
256 0x0 0x20000000
257
258 0x1000000 0x0 0x0
259 0x1000000 0x0 0x0
260 0x0 0x100000>;
261 };
262 };
263
264 pci1: pcie@ffe0a000 {
265 reg = <0x0 0xffe0a000 0x0 0x1000>;
266 ranges = <0x2000000 0x0 0x80000000 0x0 0x80000000 0x0 0x20000000
267 0x1000000 0x0 0x00000000 0x0 0xffc00000 0x0 0x10000>;
268 pcie@0 {
269 ranges = <0x2000000 0x0 0x80000000
270 0x2000000 0x0 0x80000000
271 0x0 0x20000000
272
273 0x1000000 0x0 0x0
274 0x1000000 0x0 0x0
275 0x0 0x100000>;
276 };
277 };
278};
279
280/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1023rdb.dts b/arch/powerpc/boot/dts/p1023rdb.dts
new file mode 100644
index 000000000000..0a06a88ddbd5
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1023rdb.dts
@@ -0,0 +1,234 @@
1/*
2 * P1023 RDB Device Tree Source
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * Author: Chunhe Lan <Chunhe.Lan@freescale.com>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * * Neither the name of Freescale Semiconductor nor the
16 * names of its contributors may be used to endorse or promote products
17 * derived from this software without specific prior written permission.
18 *
19 *
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
23 * later version.
24 *
25 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
26 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
29 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
31 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
32 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
33 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37/include/ "fsl/p1023si-pre.dtsi"
38
39/ {
40 model = "fsl,P1023";
41 compatible = "fsl,P1023RDB";
42 #address-cells = <2>;
43 #size-cells = <2>;
44 interrupt-parent = <&mpic>;
45
46 memory {
47 device_type = "memory";
48 };
49
50 soc: soc@ff600000 {
51 ranges = <0x0 0x0 0xff600000 0x200000>;
52
53 i2c@3000 {
54 eeprom@53 {
55 compatible = "at24,24c04";
56 reg = <0x53>;
57 };
58
59 rtc@6f {
60 compatible = "microchip,mcp7941x";
61 reg = <0x6f>;
62 };
63 };
64
65 usb@22000 {
66 dr_mode = "host";
67 phy_type = "ulpi";
68 };
69 };
70
71 lbc: localbus@ff605000 {
72 reg = <0 0xff605000 0 0x1000>;
73
74 /* NOR, NAND Flashes */
75 ranges = <0x0 0x0 0x0 0xec000000 0x04000000
76 0x1 0x0 0x0 0xffa00000 0x08000000>;
77
78 nor@0,0 {
79 #address-cells = <1>;
80 #size-cells = <1>;
81 compatible = "cfi-flash";
82 reg = <0x0 0x0 0x04000000>;
83 bank-width = <2>;
84 device-width = <1>;
85
86 partition@0 {
87 /* 48MB for Root File System */
88 reg = <0x00000000 0x03000000>;
89 label = "NOR Root File System";
90 };
91
92 partition@3000000 {
93 /* 1MB for DTB Image */
94 reg = <0x03000000 0x00100000>;
95 label = "NOR DTB Image";
96 };
97
98 partition@3100000 {
99 /* 14MB for Linux Kernel Image */
100 reg = <0x03100000 0x00e00000>;
101 label = "NOR Linux Kernel Image";
102 };
103
104 partition@3f00000 {
105 /* This location must not be altered */
106 /* 512KB for u-boot Bootloader Image */
107 /* 512KB for u-boot Environment Variables */
108 reg = <0x03f00000 0x00100000>;
109 label = "NOR U-Boot Image";
110 read-only;
111 };
112 };
113
114 nand@1,0 {
115 #address-cells = <1>;
116 #size-cells = <1>;
117 compatible = "fsl,elbc-fcm-nand";
118 reg = <0x1 0x0 0x40000>;
119
120 partition@0 {
121 /* This location must not be altered */
122 /* 1MB for u-boot Bootloader Image */
123 reg = <0x0 0x00100000>;
124 label = "NAND U-Boot Image";
125 read-only;
126 };
127
128 partition@100000 {
129 /* 1MB for DTB Image */
130 reg = <0x00100000 0x00100000>;
131 label = "NAND DTB Image";
132 };
133
134 partition@200000 {
135 /* 14MB for Linux Kernel Image */
136 reg = <0x00200000 0x00e00000>;
137 label = "NAND Linux Kernel Image";
138 };
139
140 partition@1000000 {
141 /* 96MB for Root File System Image */
142 reg = <0x01000000 0x06000000>;
143 label = "NAND Root File System";
144 };
145
146 partition@7000000 {
147 /* 16MB for User Writable Area */
148 reg = <0x07000000 0x01000000>;
149 label = "NAND Writable User area";
150 };
151 };
152 };
153
154 pci0: pcie@ff60a000 {
155 reg = <0 0xff60a000 0 0x1000>;
156 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
157 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
158 pcie@0 {
159 /* IRQ[0:3] are pulled up on board, set to active-low */
160 interrupt-map-mask = <0xf800 0 0 7>;
161 interrupt-map = <
162 /* IDSEL 0x0 */
163 0000 0 0 1 &mpic 0 1 0 0
164 0000 0 0 2 &mpic 1 1 0 0
165 0000 0 0 3 &mpic 2 1 0 0
166 0000 0 0 4 &mpic 3 1 0 0
167 >;
168 ranges = <0x2000000 0x0 0xc0000000
169 0x2000000 0x0 0xc0000000
170 0x0 0x20000000
171
172 0x1000000 0x0 0x0
173 0x1000000 0x0 0x0
174 0x0 0x100000>;
175 };
176 };
177
178 board_pci1: pci1: pcie@ff609000 {
179 reg = <0 0xff609000 0 0x1000>;
180 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
181 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
182 pcie@0 {
183 /*
184 * IRQ[4:6] only for PCIe, set to active-high,
185 * IRQ[7] is pulled up on board, set to active-low
186 */
187 interrupt-map-mask = <0xf800 0 0 7>;
188 interrupt-map = <
189 /* IDSEL 0x0 */
190 0000 0 0 1 &mpic 4 2 0 0
191 0000 0 0 2 &mpic 5 2 0 0
192 0000 0 0 3 &mpic 6 2 0 0
193 0000 0 0 4 &mpic 7 1 0 0
194 >;
195 ranges = <0x2000000 0x0 0xa0000000
196 0x2000000 0x0 0xa0000000
197 0x0 0x20000000
198
199 0x1000000 0x0 0x0
200 0x1000000 0x0 0x0
201 0x0 0x100000>;
202 };
203 };
204
205 pci2: pcie@ff60b000 {
206 reg = <0 0xff60b000 0 0x1000>;
207 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
208 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
209 pcie@0 {
210 /*
211 * IRQ[8:10] are pulled up on board, set to active-low
212 * IRQ[11] only for PCIe, set to active-high,
213 */
214 interrupt-map-mask = <0xf800 0 0 7>;
215 interrupt-map = <
216 /* IDSEL 0x0 */
217 0000 0 0 1 &mpic 8 1 0 0
218 0000 0 0 2 &mpic 9 1 0 0
219 0000 0 0 3 &mpic 10 1 0 0
220 0000 0 0 4 &mpic 11 2 0 0
221 >;
222 ranges = <0x2000000 0x0 0x80000000
223 0x2000000 0x0 0x80000000
224 0x0 0x20000000
225
226 0x1000000 0x0 0x0
227 0x1000000 0x0 0x0
228 0x0 0x100000>;
229 };
230 };
231
232};
233
234/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/pdm360ng.dts b/arch/powerpc/boot/dts/pdm360ng.dts
index 74337403faee..871c16d1ad5e 100644
--- a/arch/powerpc/boot/dts/pdm360ng.dts
+++ b/arch/powerpc/boot/dts/pdm360ng.dts
@@ -13,7 +13,7 @@
13 * option) any later version. 13 * option) any later version.
14 */ 14 */
15 15
16/include/ "mpc5121.dtsi" 16#include <mpc5121.dtsi>
17 17
18/ { 18/ {
19 model = "pdm360ng"; 19 model = "pdm360ng";
diff --git a/arch/powerpc/boot/ppc_asm.h b/arch/powerpc/boot/ppc_asm.h
index 1c2c2817f9b7..eb0e98be69e0 100644
--- a/arch/powerpc/boot/ppc_asm.h
+++ b/arch/powerpc/boot/ppc_asm.h
@@ -59,4 +59,7 @@
59#define r30 30 59#define r30 30
60#define r31 31 60#define r31 31
61 61
62#define SPRN_TBRL 268
63#define SPRN_TBRU 269
64
62#endif /* _PPC64_PPC_ASM_H */ 65#endif /* _PPC64_PPC_ASM_H */
diff --git a/arch/powerpc/boot/util.S b/arch/powerpc/boot/util.S
index 427ddfc11991..5143228e3e5f 100644
--- a/arch/powerpc/boot/util.S
+++ b/arch/powerpc/boot/util.S
@@ -71,18 +71,18 @@ udelay:
71 add r4,r4,r5 71 add r4,r4,r5
72 addi r4,r4,-1 72 addi r4,r4,-1
73 divw r4,r4,r5 /* BUS ticks */ 73 divw r4,r4,r5 /* BUS ticks */
741: mftbu r5 741: mfspr r5, SPRN_TBRU
75 mftb r6 75 mfspr r6, SPRN_TBRL
76 mftbu r7 76 mfspr r7, SPRN_TBRU
77 cmpw 0,r5,r7 77 cmpw 0,r5,r7
78 bne 1b /* Get [synced] base time */ 78 bne 1b /* Get [synced] base time */
79 addc r9,r6,r4 /* Compute end time */ 79 addc r9,r6,r4 /* Compute end time */
80 addze r8,r5 80 addze r8,r5
812: mftbu r5 812: mfspr r5, SPRN_TBRU
82 cmpw 0,r5,r8 82 cmpw 0,r5,r8
83 blt 2b 83 blt 2b
84 bgt 3f 84 bgt 3f
85 mftb r6 85 mfspr r6, SPRN_TBRL
86 cmpw 0,r6,r9 86 cmpw 0,r6,r9
87 blt 2b 87 blt 2b
883: blr 883: blr
diff --git a/arch/powerpc/configs/85xx/p1023rds_defconfig b/arch/powerpc/configs/85xx/p1023_defconfig
index b80bcc69d1f7..b06d37da44f4 100644
--- a/arch/powerpc/configs/85xx/p1023rds_defconfig
+++ b/arch/powerpc/configs/85xx/p1023_defconfig
@@ -1,14 +1,13 @@
1CONFIG_PPC_85xx=y 1CONFIG_PPC_85xx=y
2CONFIG_SMP=y 2CONFIG_SMP=y
3CONFIG_NR_CPUS=2 3CONFIG_NR_CPUS=2
4CONFIG_EXPERIMENTAL=y
5CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_AUDIT=y 7CONFIG_AUDIT=y
9CONFIG_IRQ_DOMAIN_DEBUG=y
10CONFIG_NO_HZ=y 8CONFIG_NO_HZ=y
11CONFIG_HIGH_RES_TIMERS=y 9CONFIG_HIGH_RES_TIMERS=y
10CONFIG_RCU_FANOUT=32
12CONFIG_IKCONFIG=y 11CONFIG_IKCONFIG=y
13CONFIG_IKCONFIG_PROC=y 12CONFIG_IKCONFIG_PROC=y
14CONFIG_LOG_BUF_SHIFT=14 13CONFIG_LOG_BUF_SHIFT=14
@@ -22,6 +21,8 @@ CONFIG_MODVERSIONS=y
22# CONFIG_BLK_DEV_BSG is not set 21# CONFIG_BLK_DEV_BSG is not set
23CONFIG_PARTITION_ADVANCED=y 22CONFIG_PARTITION_ADVANCED=y
24CONFIG_MAC_PARTITION=y 23CONFIG_MAC_PARTITION=y
24CONFIG_PHYSICAL_START=0x00000000
25CONFIG_P1023_RDB=y
25CONFIG_P1023_RDS=y 26CONFIG_P1023_RDS=y
26CONFIG_QUICC_ENGINE=y 27CONFIG_QUICC_ENGINE=y
27CONFIG_QE_GPIO=y 28CONFIG_QE_GPIO=y
@@ -63,10 +64,21 @@ CONFIG_IPV6=y
63CONFIG_IP_SCTP=m 64CONFIG_IP_SCTP=m
64CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 65CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
65CONFIG_DEVTMPFS=y 66CONFIG_DEVTMPFS=y
67CONFIG_DEVTMPFS_MOUNT=y
68CONFIG_MTD=y
69CONFIG_MTD_CMDLINE_PARTS=y
70CONFIG_MTD_CHAR=y
71CONFIG_MTD_BLOCK=y
72CONFIG_MTD_CFI=y
73CONFIG_MTD_CFI_AMDSTD=y
74CONFIG_MTD_PHYSMAP_OF=y
75CONFIG_MTD_NAND=y
76CONFIG_MTD_NAND_FSL_ELBC=y
66CONFIG_PROC_DEVICETREE=y 77CONFIG_PROC_DEVICETREE=y
67CONFIG_BLK_DEV_LOOP=y 78CONFIG_BLK_DEV_LOOP=y
68CONFIG_BLK_DEV_RAM=y 79CONFIG_BLK_DEV_RAM=y
69CONFIG_BLK_DEV_RAM_SIZE=131072 80CONFIG_BLK_DEV_RAM_SIZE=131072
81CONFIG_EEPROM_AT24=y
70CONFIG_EEPROM_LEGACY=y 82CONFIG_EEPROM_LEGACY=y
71CONFIG_BLK_DEV_SD=y 83CONFIG_BLK_DEV_SD=y
72CONFIG_CHR_DEV_ST=y 84CONFIG_CHR_DEV_ST=y
@@ -82,6 +94,8 @@ CONFIG_DUMMY=y
82CONFIG_FS_ENET=y 94CONFIG_FS_ENET=y
83CONFIG_FSL_PQ_MDIO=y 95CONFIG_FSL_PQ_MDIO=y
84CONFIG_E1000E=y 96CONFIG_E1000E=y
97CONFIG_PHYLIB=y
98CONFIG_AT803X_PHY=y
85CONFIG_MARVELL_PHY=y 99CONFIG_MARVELL_PHY=y
86CONFIG_DAVICOM_PHY=y 100CONFIG_DAVICOM_PHY=y
87CONFIG_CICADA_PHY=y 101CONFIG_CICADA_PHY=y
@@ -96,12 +110,15 @@ CONFIG_SERIAL_8250=y
96CONFIG_SERIAL_8250_CONSOLE=y 110CONFIG_SERIAL_8250_CONSOLE=y
97CONFIG_SERIAL_8250_NR_UARTS=2 111CONFIG_SERIAL_8250_NR_UARTS=2
98CONFIG_SERIAL_8250_RUNTIME_UARTS=2 112CONFIG_SERIAL_8250_RUNTIME_UARTS=2
113CONFIG_SERIAL_8250_EXTENDED=y
99CONFIG_SERIAL_8250_MANY_PORTS=y 114CONFIG_SERIAL_8250_MANY_PORTS=y
115CONFIG_SERIAL_8250_SHARE_IRQ=y
100CONFIG_SERIAL_8250_DETECT_IRQ=y 116CONFIG_SERIAL_8250_DETECT_IRQ=y
101CONFIG_SERIAL_8250_RSA=y 117CONFIG_SERIAL_8250_RSA=y
102CONFIG_SERIAL_QE=m 118CONFIG_HW_RANDOM=y
103CONFIG_NVRAM=y 119CONFIG_NVRAM=y
104CONFIG_I2C=y 120CONFIG_I2C=y
121CONFIG_I2C_CHARDEV=y
105CONFIG_I2C_CPM=m 122CONFIG_I2C_CPM=m
106CONFIG_I2C_MPC=y 123CONFIG_I2C_MPC=y
107CONFIG_GPIO_MPC8XXX=y 124CONFIG_GPIO_MPC8XXX=y
@@ -121,6 +138,7 @@ CONFIG_USB_STORAGE=y
121CONFIG_EDAC=y 138CONFIG_EDAC=y
122CONFIG_EDAC_MM_EDAC=y 139CONFIG_EDAC_MM_EDAC=y
123CONFIG_RTC_CLASS=y 140CONFIG_RTC_CLASS=y
141CONFIG_RTC_DRV_DS1307=y
124CONFIG_RTC_DRV_CMOS=y 142CONFIG_RTC_DRV_CMOS=y
125CONFIG_DMADEVICES=y 143CONFIG_DMADEVICES=y
126CONFIG_FSL_DMA=y 144CONFIG_FSL_DMA=y
@@ -161,6 +179,7 @@ CONFIG_DEBUG_FS=y
161CONFIG_DETECT_HUNG_TASK=y 179CONFIG_DETECT_HUNG_TASK=y
162# CONFIG_DEBUG_BUGVERBOSE is not set 180# CONFIG_DEBUG_BUGVERBOSE is not set
163CONFIG_DEBUG_INFO=y 181CONFIG_DEBUG_INFO=y
182CONFIG_STRICT_DEVMEM=y
164CONFIG_CRYPTO_PCBC=m 183CONFIG_CRYPTO_PCBC=m
165CONFIG_CRYPTO_SHA256=y 184CONFIG_CRYPTO_SHA256=y
166CONFIG_CRYPTO_SHA512=y 185CONFIG_CRYPTO_SHA512=y
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 60027c2a7034..3dfab4c40c76 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -70,6 +70,7 @@ CONFIG_IPV6=y
70CONFIG_IP_SCTP=m 70CONFIG_IP_SCTP=m
71CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 71CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
72CONFIG_DEVTMPFS=y 72CONFIG_DEVTMPFS=y
73CONFIG_DEVTMPFS_MOUNT=y
73CONFIG_MTD=y 74CONFIG_MTD=y
74CONFIG_MTD_CMDLINE_PARTS=y 75CONFIG_MTD_CMDLINE_PARTS=y
75CONFIG_MTD_CHAR=y 76CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 6c8b020806ff..fa94fb3bb44d 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -27,6 +27,8 @@ CONFIG_P5040_DS=y
27CONFIG_T4240_QDS=y 27CONFIG_T4240_QDS=y
28# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set 28# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
29CONFIG_BINFMT_MISC=m 29CONFIG_BINFMT_MISC=m
30CONFIG_MATH_EMULATION=y
31CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED=y
30CONFIG_FSL_IFC=y 32CONFIG_FSL_IFC=y
31CONFIG_PCIEPORTBUS=y 33CONFIG_PCIEPORTBUS=y
32CONFIG_PCI_MSI=y 34CONFIG_PCI_MSI=y
@@ -59,6 +61,7 @@ CONFIG_IPV6=y
59CONFIG_IP_SCTP=m 61CONFIG_IP_SCTP=m
60CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 62CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
61CONFIG_DEVTMPFS=y 63CONFIG_DEVTMPFS=y
64CONFIG_DEVTMPFS_MOUNT=y
62CONFIG_MTD=y 65CONFIG_MTD=y
63CONFIG_MTD_PARTITIONS=y 66CONFIG_MTD_PARTITIONS=y
64CONFIG_MTD_OF_PARTS=y 67CONFIG_MTD_OF_PARTS=y
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig
index 09116c6a6719..23fec79964cf 100644
--- a/arch/powerpc/configs/mpc83xx_defconfig
+++ b/arch/powerpc/configs/mpc83xx_defconfig
@@ -42,6 +42,7 @@ CONFIG_INET_ESP=y
42# CONFIG_IPV6 is not set 42# CONFIG_IPV6 is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44CONFIG_DEVTMPFS=y 44CONFIG_DEVTMPFS=y
45CONFIG_DEVTMPFS_MOUNT=y
45# CONFIG_FW_LOADER is not set 46# CONFIG_FW_LOADER is not set
46CONFIG_MTD=y 47CONFIG_MTD=y
47CONFIG_MTD_CHAR=y 48CONFIG_MTD_CHAR=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 5a58882e351e..dc098d988211 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -27,6 +27,7 @@ CONFIG_MPC85xx_MDS=y
27CONFIG_MPC8536_DS=y 27CONFIG_MPC8536_DS=y
28CONFIG_MPC85xx_DS=y 28CONFIG_MPC85xx_DS=y
29CONFIG_MPC85xx_RDB=y 29CONFIG_MPC85xx_RDB=y
30CONFIG_C293_PCIE=y
30CONFIG_P1010_RDB=y 31CONFIG_P1010_RDB=y
31CONFIG_P1022_DS=y 32CONFIG_P1022_DS=y
32CONFIG_P1022_RDK=y 33CONFIG_P1022_RDK=y
@@ -78,6 +79,7 @@ CONFIG_IPV6=y
78CONFIG_IP_SCTP=m 79CONFIG_IP_SCTP=m
79CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 80CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
80CONFIG_DEVTMPFS=y 81CONFIG_DEVTMPFS=y
82CONFIG_DEVTMPFS_MOUNT=y
81CONFIG_MTD=y 83CONFIG_MTD=y
82CONFIG_MTD_PARTITIONS=y 84CONFIG_MTD_PARTITIONS=y
83CONFIG_MTD_OF_PARTS=y 85CONFIG_MTD_OF_PARTS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 152fa05b15e4..5bca60161bb3 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -30,6 +30,7 @@ CONFIG_MPC85xx_MDS=y
30CONFIG_MPC8536_DS=y 30CONFIG_MPC8536_DS=y
31CONFIG_MPC85xx_DS=y 31CONFIG_MPC85xx_DS=y
32CONFIG_MPC85xx_RDB=y 32CONFIG_MPC85xx_RDB=y
33CONFIG_C293_PCIE=y
33CONFIG_P1010_RDB=y 34CONFIG_P1010_RDB=y
34CONFIG_P1022_DS=y 35CONFIG_P1022_DS=y
35CONFIG_P1022_RDK=y 36CONFIG_P1022_RDK=y
@@ -81,6 +82,7 @@ CONFIG_IPV6=y
81CONFIG_IP_SCTP=m 82CONFIG_IP_SCTP=m
82CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 83CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
83CONFIG_DEVTMPFS=y 84CONFIG_DEVTMPFS=y
85CONFIG_DEVTMPFS_MOUNT=y
84CONFIG_MTD=y 86CONFIG_MTD=y
85CONFIG_MTD_PARTITIONS=y 87CONFIG_MTD_PARTITIONS=y
86CONFIG_MTD_OF_PARTS=y 88CONFIG_MTD_OF_PARTS=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300db..704e6f10ae80 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,4 @@
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += rwsem.h 3generic-y += rwsem.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index 6e82f5f9a6fd..4b237aa35660 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -32,6 +32,15 @@
32#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS) 32#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), RS)
33#define PPC_LR_STKOFF 16 33#define PPC_LR_STKOFF 16
34#define PPC_MIN_STKFRM 112 34#define PPC_MIN_STKFRM 112
35
36#ifdef __BIG_ENDIAN__
37#define LDX_BE stringify_in_c(ldx)
38#define STDX_BE stringify_in_c(stdx)
39#else
40#define LDX_BE stringify_in_c(ldbrx)
41#define STDX_BE stringify_in_c(stdbrx)
42#endif
43
35#else /* 32-bit */ 44#else /* 32-bit */
36 45
37/* operations for longs and pointers */ 46/* operations for longs and pointers */
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 906f46e31006..89fc382648bc 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -13,6 +13,7 @@ extern void btext_update_display(unsigned long phys, int width, int height,
13extern void btext_setup_display(int width, int height, int depth, int pitch, 13extern void btext_setup_display(int width, int height, int depth, int pitch,
14 unsigned long address); 14 unsigned long address);
15extern void btext_prepare_BAT(void); 15extern void btext_prepare_BAT(void);
16extern void btext_map(void);
16extern void btext_unmap(void); 17extern void btext_unmap(void);
17 18
18extern void btext_drawchar(char c); 19extern void btext_drawchar(char c);
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index b843e35122e8..5b9312220e84 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -32,13 +32,7 @@ extern void flush_dcache_page(struct page *page);
32 32
33extern void __flush_disable_L1(void); 33extern void __flush_disable_L1(void);
34 34
35extern void __flush_icache_range(unsigned long, unsigned long); 35extern void flush_icache_range(unsigned long, unsigned long);
36static inline void flush_icache_range(unsigned long start, unsigned long stop)
37{
38 if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
39 __flush_icache_range(start, stop);
40}
41
42extern void flush_icache_user_range(struct vm_area_struct *vma, 36extern void flush_icache_user_range(struct vm_area_struct *vma,
43 struct page *page, unsigned long addr, 37 struct page *page, unsigned long addr,
44 int len); 38 int len);
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 6f3887d884d2..0d4939ba48e7 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -371,14 +371,19 @@ extern const char *powerpc_base_platform;
371#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 371#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
372 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 372 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
373 CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) 373 CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
374/*
375 * e5500/e6500 erratum A-006958 is a timebase bug that can use the
376 * same workaround as CPU_FTR_CELL_TB_BUG.
377 */
374#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 378#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
375 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 379 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
376 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 380 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
377 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV) 381 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_CELL_TB_BUG)
378#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 382#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
379 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 383 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
380 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 384 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
381 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP) 385 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
386 CPU_FTR_CELL_TB_BUG)
382#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 387#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
383 388
384/* 64-bit CPUs */ 389/* 64-bit CPUs */
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h
index 63f2a22e9954..5a8b82aa7241 100644
--- a/arch/powerpc/include/asm/emulated_ops.h
+++ b/arch/powerpc/include/asm/emulated_ops.h
@@ -46,8 +46,6 @@ extern struct ppc_emulated {
46 struct ppc_emulated_entry unaligned; 46 struct ppc_emulated_entry unaligned;
47#ifdef CONFIG_MATH_EMULATION 47#ifdef CONFIG_MATH_EMULATION
48 struct ppc_emulated_entry math; 48 struct ppc_emulated_entry math;
49#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
50 struct ppc_emulated_entry 8xx;
51#endif 49#endif
52#ifdef CONFIG_VSX 50#ifdef CONFIG_VSX
53 struct ppc_emulated_entry vsx; 51 struct ppc_emulated_entry vsx;
diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h
index d3d634274d2c..86b0ac79990c 100644
--- a/arch/powerpc/include/asm/epapr_hcalls.h
+++ b/arch/powerpc/include/asm/epapr_hcalls.h
@@ -105,6 +105,12 @@
105extern bool epapr_paravirt_enabled; 105extern bool epapr_paravirt_enabled;
106extern u32 epapr_hypercall_start[]; 106extern u32 epapr_hypercall_start[];
107 107
108#ifdef CONFIG_EPAPR_PARAVIRT
109int __init epapr_paravirt_early_init(void);
110#else
111static inline int epapr_paravirt_early_init(void) { return 0; }
112#endif
113
108/* 114/*
109 * We use "uintptr_t" to define a register because it's guaranteed to be a 115 * We use "uintptr_t" to define a register because it's guaranteed to be a
110 * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit 116 * 32-bit integer on a 32-bit platform, and a 64-bit integer on a 64-bit
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 07ca627e52c0..cca12f084842 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -48,17 +48,18 @@
48#define EX_LR 72 48#define EX_LR 72
49#define EX_CFAR 80 49#define EX_CFAR 80
50#define EX_PPR 88 /* SMT thread status register (priority) */ 50#define EX_PPR 88 /* SMT thread status register (priority) */
51#define EX_CTR 96
51 52
52#ifdef CONFIG_RELOCATABLE 53#ifdef CONFIG_RELOCATABLE
53#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 54#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
54 ld r12,PACAKBASE(r13); /* get high part of &label */ \ 55 ld r12,PACAKBASE(r13); /* get high part of &label */ \
55 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ 56 mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \
56 LOAD_HANDLER(r12,label); \ 57 LOAD_HANDLER(r12,label); \
57 mtlr r12; \ 58 mtctr r12; \
58 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ 59 mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \
59 li r10,MSR_RI; \ 60 li r10,MSR_RI; \
60 mtmsrd r10,1; /* Set RI (EE=0) */ \ 61 mtmsrd r10,1; /* Set RI (EE=0) */ \
61 blr; 62 bctr;
62#else 63#else
63/* If not relocatable, we can jump directly -- and save messing with LR */ 64/* If not relocatable, we can jump directly -- and save messing with LR */
64#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \ 65#define __EXCEPTION_RELON_PROLOG_PSERIES_1(label, h) \
@@ -97,18 +98,18 @@
97 98
98#if defined(CONFIG_RELOCATABLE) 99#if defined(CONFIG_RELOCATABLE)
99/* 100/*
100 * If we support interrupts with relocation on AND we're a relocatable 101 * If we support interrupts with relocation on AND we're a relocatable kernel,
101 * kernel, we need to use LR to get to the 2nd level handler. So, save/restore 102 * we need to use CTR to get to the 2nd level handler. So, save/restore it
102 * it when required. 103 * when required.
103 */ 104 */
104#define SAVE_LR(reg, area) mflr reg ; std reg,area+EX_LR(r13) 105#define SAVE_CTR(reg, area) mfctr reg ; std reg,area+EX_CTR(r13)
105#define GET_LR(reg, area) ld reg,area+EX_LR(r13) 106#define GET_CTR(reg, area) ld reg,area+EX_CTR(r13)
106#define RESTORE_LR(reg, area) ld reg,area+EX_LR(r13) ; mtlr reg 107#define RESTORE_CTR(reg, area) ld reg,area+EX_CTR(r13) ; mtctr reg
107#else 108#else
108/* ...else LR is unused and in register. */ 109/* ...else CTR is unused and in register. */
109#define SAVE_LR(reg, area) 110#define SAVE_CTR(reg, area)
110#define GET_LR(reg, area) mflr reg 111#define GET_CTR(reg, area) mfctr reg
111#define RESTORE_LR(reg, area) 112#define RESTORE_CTR(reg, area)
112#endif 113#endif
113 114
114/* 115/*
@@ -164,7 +165,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
164#define __EXCEPTION_PROLOG_1(area, extra, vec) \ 165#define __EXCEPTION_PROLOG_1(area, extra, vec) \
165 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ 166 OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \
166 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ 167 OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \
167 SAVE_LR(r10, area); \ 168 SAVE_CTR(r10, area); \
168 mfcr r9; \ 169 mfcr r9; \
169 extra(vec); \ 170 extra(vec); \
170 std r11,area+EX_R11(r13); \ 171 std r11,area+EX_R11(r13); \
@@ -270,7 +271,7 @@ do_kvm_##n: \
270 sth r1,PACA_TRAP_SAVE(r13); \ 271 sth r1,PACA_TRAP_SAVE(r13); \
271 std r3,area+EX_R3(r13); \ 272 std r3,area+EX_R3(r13); \
272 addi r3,r13,area; /* r3 -> where regs are saved*/ \ 273 addi r3,r13,area; /* r3 -> where regs are saved*/ \
273 RESTORE_LR(r1, area); \ 274 RESTORE_CTR(r1, area); \
274 b bad_stack; \ 275 b bad_stack; \
2753: std r9,_CCR(r1); /* save CR in stackframe */ \ 2763: std r9,_CCR(r1); /* save CR in stackframe */ \
276 std r11,_NIP(r1); /* save SRR0 in stackframe */ \ 277 std r11,_NIP(r1); /* save SRR0 in stackframe */ \
@@ -298,10 +299,10 @@ do_kvm_##n: \
298 ld r10,area+EX_CFAR(r13); \ 299 ld r10,area+EX_CFAR(r13); \
299 std r10,ORIG_GPR3(r1); \ 300 std r10,ORIG_GPR3(r1); \
300 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ 301 END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \
301 GET_LR(r9,area); /* Get LR, later save to stack */ \ 302 mflr r9; /* Get LR, later save to stack */ \
302 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ 303 ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \
303 std r9,_LINK(r1); \ 304 std r9,_LINK(r1); \
304 mfctr r10; /* save CTR in stackframe */ \ 305 GET_CTR(r10, area); \
305 std r10,_CTR(r1); \ 306 std r10,_CTR(r1); \
306 lbz r10,PACASOFTIRQEN(r13); \ 307 lbz r10,PACASOFTIRQEN(r13); \
307 mfspr r11,SPRN_XER; /* save XER in stackframe */ \ 308 mfspr r11,SPRN_XER; /* save XER in stackframe */ \
@@ -479,7 +480,7 @@ label##_relon_hv: \
479 */ 480 */
480 481
481/* Exception addition: Hard disable interrupts */ 482/* Exception addition: Hard disable interrupts */
482#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) 483#define DISABLE_INTS RECONCILE_IRQ_STATE(r10,r11)
483 484
484#define ADD_NVGPRS \ 485#define ADD_NVGPRS \
485 bl .save_nvgprs 486 bl .save_nvgprs
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index dd15e5e37d6d..5a64757dc0d1 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -69,8 +69,18 @@ extern unsigned long pci_dram_offset;
69 69
70extern resource_size_t isa_mem_base; 70extern resource_size_t isa_mem_base;
71 71
72#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_INDIRECT_IO) 72/* Boolean set by platform if PIO accesses are suppored while _IO_BASE
73#error CONFIG_PPC_INDIRECT_IO is not yet supported on 32 bits 73 * is not set or addresses cannot be translated to MMIO. This is typically
74 * set when the platform supports "special" PIO accesses via a non memory
75 * mapped mechanism, and allows things like the early udbg UART code to
76 * function.
77 */
78extern bool isa_io_special;
79
80#ifdef CONFIG_PPC32
81#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
82#error CONFIG_PPC_INDIRECT_{PIO,MMIO} are not yet supported on 32 bits
83#endif
74#endif 84#endif
75 85
76/* 86/*
@@ -222,9 +232,9 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
222 * for PowerPC is as close as possible to the x86 version of these, and thus 232 * for PowerPC is as close as possible to the x86 version of these, and thus
223 * provides fairly heavy weight barriers for the non-raw versions 233 * provides fairly heavy weight barriers for the non-raw versions
224 * 234 *
225 * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_IO 235 * In addition, they support a hook mechanism when CONFIG_PPC_INDIRECT_MMIO
226 * allowing the platform to provide its own implementation of some or all 236 * or CONFIG_PPC_INDIRECT_PIO are set allowing the platform to provide its
227 * of the accessors. 237 * own implementation of some or all of the accessors.
228 */ 238 */
229 239
230/* 240/*
@@ -240,8 +250,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
240 250
241/* Indirect IO address tokens: 251/* Indirect IO address tokens:
242 * 252 *
243 * When CONFIG_PPC_INDIRECT_IO is set, the platform can provide hooks 253 * When CONFIG_PPC_INDIRECT_MMIO is set, the platform can provide hooks
244 * on all IOs. (Note that this is all 64 bits only for now) 254 * on all MMIOs. (Note that this is all 64 bits only for now)
245 * 255 *
246 * To help platforms who may need to differenciate MMIO addresses in 256 * To help platforms who may need to differenciate MMIO addresses in
247 * their hooks, a bitfield is reserved for use by the platform near the 257 * their hooks, a bitfield is reserved for use by the platform near the
@@ -263,11 +273,14 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
263 * 273 *
264 * The direct IO mapping operations will then mask off those bits 274 * The direct IO mapping operations will then mask off those bits
265 * before doing the actual access, though that only happen when 275 * before doing the actual access, though that only happen when
266 * CONFIG_PPC_INDIRECT_IO is set, thus be careful when you use that 276 * CONFIG_PPC_INDIRECT_MMIO is set, thus be careful when you use that
267 * mechanism 277 * mechanism
278 *
279 * For PIO, there is a separate CONFIG_PPC_INDIRECT_PIO which makes
280 * all PIO functions call through a hook.
268 */ 281 */
269 282
270#ifdef CONFIG_PPC_INDIRECT_IO 283#ifdef CONFIG_PPC_INDIRECT_MMIO
271#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul 284#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
272#define PCI_IO_IND_TOKEN_SHIFT 48 285#define PCI_IO_IND_TOKEN_SHIFT 48
273#define PCI_FIX_ADDR(addr) \ 286#define PCI_FIX_ADDR(addr) \
@@ -672,7 +685,7 @@ extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
672extern void __iounmap_at(void *ea, unsigned long size); 685extern void __iounmap_at(void *ea, unsigned long size);
673 686
674/* 687/*
675 * When CONFIG_PPC_INDIRECT_IO is set, we use the generic iomap implementation 688 * When CONFIG_PPC_INDIRECT_PIO is set, we use the generic iomap implementation
676 * which needs some additional definitions here. They basically allow PIO 689 * which needs some additional definitions here. They basically allow PIO
677 * space overall to be 1GB. This will work as long as we never try to use 690 * space overall to be 1GB. This will work as long as we never try to use
678 * iomap to map MMIO below 1GB which should be fine on ppc64 691 * iomap to map MMIO below 1GB which should be fine on ppc64
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index 6f9b6e23dc5a..f51a5580bfd0 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -40,9 +40,10 @@
40#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) 40#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
41 41
42/* 42/*
43 * This is used by assembly code to soft-disable interrupts 43 * This is used by assembly code to soft-disable interrupts first and
44 * reconcile irq state.
44 */ 45 */
45#define SOFT_DISABLE_INTS(__rA, __rB) \ 46#define RECONCILE_IRQ_STATE(__rA, __rB) \
46 lbz __rA,PACASOFTIRQEN(r13); \ 47 lbz __rA,PACASOFTIRQEN(r13); \
47 lbz __rB,PACAIRQHAPPENED(r13); \ 48 lbz __rB,PACAIRQHAPPENED(r13); \
48 cmpwi cr0,__rA,0; \ 49 cmpwi cr0,__rA,0; \
@@ -58,7 +59,7 @@
58#define TRACE_ENABLE_INTS 59#define TRACE_ENABLE_INTS
59#define TRACE_DISABLE_INTS 60#define TRACE_DISABLE_INTS
60 61
61#define SOFT_DISABLE_INTS(__rA, __rB) \ 62#define RECONCILE_IRQ_STATE(__rA, __rB) \
62 lbz __rA,PACAIRQHAPPENED(r13); \ 63 lbz __rA,PACAIRQHAPPENED(r13); \
63 li __rB,0; \ 64 li __rB,0; \
64 ori __rA,__rA,PACA_IRQ_HARD_DIS; \ 65 ori __rA,__rA,PACA_IRQ_HARD_DIS; \
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb6..fa19e2f1a874 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
334 return r; 334 return r;
335} 335}
336 336
337/*
338 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
339 * Because the sc instruction sets SRR0 to point to the following
340 * instruction, we have to fetch from pc - 4.
341 */
342static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
343{
344 ulong pc = kvmppc_get_pc(vcpu) - 4;
345 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
346 u32 r;
347
348 /* Load the instruction manually if it failed to do so in the
349 * exit path */
350 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
351 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
352
353 r = svcpu->last_inst;
354 svcpu_put(svcpu);
355 return r;
356}
357
337static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 358static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
338{ 359{
339 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 360 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
446 return vcpu->arch.last_inst; 467 return vcpu->arch.last_inst;
447} 468}
448 469
470/*
471 * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
472 * Because the sc instruction sets SRR0 to point to the following
473 * instruction, we have to fetch from pc - 4.
474 */
475static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
476{
477 ulong pc = kvmppc_get_pc(vcpu) - 4;
478
479 /* Load the instruction manually if it failed to do so in the
480 * exit path */
481 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
482 kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
483
484 return vcpu->arch.last_inst;
485}
486
449static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 487static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
450{ 488{
451 return vcpu->arch.fault_dar; 489 return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a1ecb14e4442..86d638a3b359 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
37 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV 38#ifdef CONFIG_KVM_BOOK3S_64_HV
39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ 39#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
40extern int kvm_hpt_order; /* order of preallocated HPTs */ 40extern unsigned long kvm_rma_pages;
41#endif 41#endif
42 42
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */ 43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
100 /* (masks depend on page size) */ 100 /* (masks depend on page size) */
101 rb |= 0x1000; /* page encoding in LP field */ 101 rb |= 0x1000; /* page encoding in LP field */
102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */ 102 rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
103 rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */ 103 rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
104 } 104 }
105 } else { 105 } else {
106 /* 4kB page */ 106 /* 4kB page */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af326cde7cb6..33283532e9d8 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
183 struct page *pages[0]; 183 struct page *pages[0];
184}; 184};
185 185
186struct kvmppc_linear_info { 186struct kvm_rma_info {
187 void *base_virt; 187 atomic_t use_count;
188 unsigned long base_pfn; 188 unsigned long base_pfn;
189 unsigned long npages;
190 struct list_head list;
191 atomic_t use_count;
192 int type;
193}; 189};
194 190
195/* XICS components, defined in book3s_xics.c */ 191/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
246 int tlbie_lock; 242 int tlbie_lock;
247 unsigned long lpcr; 243 unsigned long lpcr;
248 unsigned long rmor; 244 unsigned long rmor;
249 struct kvmppc_linear_info *rma; 245 struct kvm_rma_info *rma;
250 unsigned long vrma_slb_v; 246 unsigned long vrma_slb_v;
251 int rma_setup_done; 247 int rma_setup_done;
252 int using_mmu_notifiers; 248 int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
259 spinlock_t slot_phys_lock; 255 spinlock_t slot_phys_lock;
260 cpumask_t need_tlb_flush; 256 cpumask_t need_tlb_flush;
261 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 257 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
262 struct kvmppc_linear_info *hpt_li; 258 int hpt_cma_alloc;
263#endif /* CONFIG_KVM_BOOK3S_64_HV */ 259#endif /* CONFIG_KVM_BOOK3S_64_HV */
264#ifdef CONFIG_PPC_BOOK3S_64 260#ifdef CONFIG_PPC_BOOK3S_64
265 struct list_head spapr_tce_tables; 261 struct list_head spapr_tce_tables;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a5287fe03d77..b15554a26c20 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -137,10 +137,10 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
137 unsigned long ioba, unsigned long tce); 137 unsigned long ioba, unsigned long tce);
138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, 138extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
139 struct kvm_allocate_rma *rma); 139 struct kvm_allocate_rma *rma);
140extern struct kvmppc_linear_info *kvm_alloc_rma(void); 140extern struct kvm_rma_info *kvm_alloc_rma(void);
141extern void kvm_release_rma(struct kvmppc_linear_info *ri); 141extern void kvm_release_rma(struct kvm_rma_info *ri);
142extern struct kvmppc_linear_info *kvm_alloc_hpt(void); 142extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
143extern void kvm_release_hpt(struct kvmppc_linear_info *li); 143extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
144extern int kvmppc_core_init_vm(struct kvm *kvm); 144extern int kvmppc_core_init_vm(struct kvm *kvm);
145extern void kvmppc_core_destroy_vm(struct kvm *kvm); 145extern void kvmppc_core_destroy_vm(struct kvm *kvm);
146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, 146extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
@@ -261,6 +261,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
261struct openpic; 261struct openpic;
262 262
263#ifdef CONFIG_KVM_BOOK3S_64_HV 263#ifdef CONFIG_KVM_BOOK3S_64_HV
264extern void kvm_cma_reserve(void) __init;
264static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 265static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
265{ 266{
266 paca[cpu].kvm_hstate.xics_phys = addr; 267 paca[cpu].kvm_hstate.xics_phys = addr;
@@ -281,13 +282,12 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
281} 282}
282 283
283extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); 284extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
284extern void kvm_linear_init(void);
285 285
286#else 286#else
287static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 287static inline void __init kvm_cma_reserve(void)
288{} 288{}
289 289
290static inline void kvm_linear_init(void) 290static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
291{} 291{}
292 292
293static inline u32 kvmppc_get_xics_latch(void) 293static inline u32 kvmppc_get_xics_latch(void)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
394 } 394 }
395} 395}
396 396
397/* Please call after prepare_to_enter. This function puts the lazy ee state 397/*
398 back to normal mode, without actually enabling interrupts. */ 398 * Please call after prepare_to_enter. This function puts the lazy ee and irq
399static inline void kvmppc_lazy_ee_enable(void) 399 * disabled tracking state back to normal mode, without actually enabling
400 * interrupts.
401 */
402static inline void kvmppc_fix_ee_before_entry(void)
400{ 403{
404 trace_hardirqs_on();
405
401#ifdef CONFIG_PPC64 406#ifdef CONFIG_PPC64
402 /* Only need to enable IRQs by hard enabling them after this */ 407 /* Only need to enable IRQs by hard enabling them after this */
403 local_paca->irq_happened = 0; 408 local_paca->irq_happened = 0;
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 9b12f88d4adb..4470d1e34d23 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -48,15 +48,13 @@
48struct lppaca { 48struct lppaca {
49 /* cacheline 1 contains read-only data */ 49 /* cacheline 1 contains read-only data */
50 50
51 u32 desc; /* Eye catcher 0xD397D781 */ 51 __be32 desc; /* Eye catcher 0xD397D781 */
52 u16 size; /* Size of this struct */ 52 __be16 size; /* Size of this struct */
53 u16 reserved1; 53 u8 reserved1[3];
54 u16 reserved2:14; 54 u8 __old_status; /* Old status, including shared proc */
55 u8 shared_proc:1; /* Shared processor indicator */
56 u8 secondary_thread:1; /* Secondary thread indicator */
57 u8 reserved3[14]; 55 u8 reserved3[14];
58 volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */ 56 volatile __be32 dyn_hw_node_id; /* Dynamic hardware node id */
59 volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */ 57 volatile __be32 dyn_hw_proc_id; /* Dynamic hardware proc id */
60 u8 reserved4[56]; 58 u8 reserved4[56];
61 volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */ 59 volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
62 /* associativity change counters */ 60 /* associativity change counters */
@@ -73,9 +71,9 @@ struct lppaca {
73 u8 fpregs_in_use; 71 u8 fpregs_in_use;
74 u8 pmcregs_in_use; 72 u8 pmcregs_in_use;
75 u8 reserved8[28]; 73 u8 reserved8[28];
76 u64 wait_state_cycles; /* Wait cycles for this proc */ 74 __be64 wait_state_cycles; /* Wait cycles for this proc */
77 u8 reserved9[28]; 75 u8 reserved9[28];
78 u16 slb_count; /* # of SLBs to maintain */ 76 __be16 slb_count; /* # of SLBs to maintain */
79 u8 idle; /* Indicate OS is idle */ 77 u8 idle; /* Indicate OS is idle */
80 u8 vmxregs_in_use; 78 u8 vmxregs_in_use;
81 79
@@ -89,17 +87,17 @@ struct lppaca {
89 * NOTE: This value will ALWAYS be zero for dedicated processors and 87 * NOTE: This value will ALWAYS be zero for dedicated processors and
90 * will NEVER be zero for shared processors (ie, initialized to a 1). 88 * will NEVER be zero for shared processors (ie, initialized to a 1).
91 */ 89 */
92 volatile u32 yield_count; 90 volatile __be32 yield_count;
93 volatile u32 dispersion_count; /* dispatch changed physical cpu */ 91 volatile __be32 dispersion_count; /* dispatch changed physical cpu */
94 volatile u64 cmo_faults; /* CMO page fault count */ 92 volatile __be64 cmo_faults; /* CMO page fault count */
95 volatile u64 cmo_fault_time; /* CMO page fault time */ 93 volatile __be64 cmo_fault_time; /* CMO page fault time */
96 u8 reserved10[104]; 94 u8 reserved10[104];
97 95
98 /* cacheline 4-5 */ 96 /* cacheline 4-5 */
99 97
100 u32 page_ins; /* CMO Hint - # page ins by OS */ 98 __be32 page_ins; /* CMO Hint - # page ins by OS */
101 u8 reserved11[148]; 99 u8 reserved11[148];
102 volatile u64 dtl_idx; /* Dispatch Trace Log head index */ 100 volatile __be64 dtl_idx; /* Dispatch Trace Log head index */
103 u8 reserved12[96]; 101 u8 reserved12[96];
104} __attribute__((__aligned__(0x400))); 102} __attribute__((__aligned__(0x400)));
105 103
@@ -108,17 +106,29 @@ extern struct lppaca lppaca[];
108#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) 106#define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
109 107
110/* 108/*
109 * Old kernels used a reserved bit in the VPA to determine if it was running
110 * in shared processor mode. New kernels look for a non zero yield count
111 * but KVM still needs to set the bit to keep the old stuff happy.
112 */
113#define LPPACA_OLD_SHARED_PROC 2
114
115static inline bool lppaca_shared_proc(struct lppaca *l)
116{
117 return l->yield_count != 0;
118}
119
120/*
111 * SLB shadow buffer structure as defined in the PAPR. The save_area 121 * SLB shadow buffer structure as defined in the PAPR. The save_area
112 * contains adjacent ESID and VSID pairs for each shadowed SLB. The 122 * contains adjacent ESID and VSID pairs for each shadowed SLB. The
113 * ESID is stored in the lower 64bits, then the VSID. 123 * ESID is stored in the lower 64bits, then the VSID.
114 */ 124 */
115struct slb_shadow { 125struct slb_shadow {
116 u32 persistent; /* Number of persistent SLBs */ 126 __be32 persistent; /* Number of persistent SLBs */
117 u32 buffer_length; /* Total shadow buffer length */ 127 __be32 buffer_length; /* Total shadow buffer length */
118 u64 reserved; 128 __be64 reserved;
119 struct { 129 struct {
120 u64 esid; 130 __be64 esid;
121 u64 vsid; 131 __be64 vsid;
122 } save_area[SLB_NUM_BOLTED]; 132 } save_area[SLB_NUM_BOLTED];
123} ____cacheline_aligned; 133} ____cacheline_aligned;
124 134
@@ -130,14 +140,14 @@ extern struct slb_shadow slb_shadow[];
130struct dtl_entry { 140struct dtl_entry {
131 u8 dispatch_reason; 141 u8 dispatch_reason;
132 u8 preempt_reason; 142 u8 preempt_reason;
133 u16 processor_id; 143 __be16 processor_id;
134 u32 enqueue_to_dispatch_time; 144 __be32 enqueue_to_dispatch_time;
135 u32 ready_to_enqueue_time; 145 __be32 ready_to_enqueue_time;
136 u32 waiting_to_ready_time; 146 __be32 waiting_to_ready_time;
137 u64 timebase; 147 __be64 timebase;
138 u64 fault_addr; 148 __be64 fault_addr;
139 u64 srr0; 149 __be64 srr0;
140 u64 srr1; 150 __be64 srr1;
141}; 151};
142 152
143#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ 153#define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */
diff --git a/arch/powerpc/include/asm/mpc5121.h b/arch/powerpc/include/asm/mpc5121.h
index 8ae133eaf9fa..887d3d6133e3 100644
--- a/arch/powerpc/include/asm/mpc5121.h
+++ b/arch/powerpc/include/asm/mpc5121.h
@@ -32,25 +32,11 @@ struct mpc512x_ccm {
32 u32 scfr2; /* System Clock Frequency Register 2 */ 32 u32 scfr2; /* System Clock Frequency Register 2 */
33 u32 scfr2s; /* System Clock Frequency Shadow Register 2 */ 33 u32 scfr2s; /* System Clock Frequency Shadow Register 2 */
34 u32 bcr; /* Bread Crumb Register */ 34 u32 bcr; /* Bread Crumb Register */
35 u32 p0ccr; /* PSC0 Clock Control Register */ 35 u32 psc_ccr[12]; /* PSC Clock Control Registers */
36 u32 p1ccr; /* PSC1 CCR */
37 u32 p2ccr; /* PSC2 CCR */
38 u32 p3ccr; /* PSC3 CCR */
39 u32 p4ccr; /* PSC4 CCR */
40 u32 p5ccr; /* PSC5 CCR */
41 u32 p6ccr; /* PSC6 CCR */
42 u32 p7ccr; /* PSC7 CCR */
43 u32 p8ccr; /* PSC8 CCR */
44 u32 p9ccr; /* PSC9 CCR */
45 u32 p10ccr; /* PSC10 CCR */
46 u32 p11ccr; /* PSC11 CCR */
47 u32 spccr; /* SPDIF Clock Control Register */ 36 u32 spccr; /* SPDIF Clock Control Register */
48 u32 cccr; /* CFM Clock Control Register */ 37 u32 cccr; /* CFM Clock Control Register */
49 u32 dccr; /* DIU Clock Control Register */ 38 u32 dccr; /* DIU Clock Control Register */
50 u32 m1ccr; /* MSCAN1 CCR */ 39 u32 mscan_ccr[4]; /* MSCAN Clock Control Registers */
51 u32 m2ccr; /* MSCAN2 CCR */
52 u32 m3ccr; /* MSCAN3 CCR */
53 u32 m4ccr; /* MSCAN4 CCR */
54 u8 res[0x98]; /* Reserved */ 40 u8 res[0x98]; /* Reserved */
55}; 41};
56 42
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
new file mode 100644
index 000000000000..736d4acc05a8
--- /dev/null
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -0,0 +1,92 @@
1/*
2 * MPC85xx cpu type detection
3 *
4 * Copyright 2011-2012 Freescale Semiconductor, Inc.
5 *
6 * This is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __ASM_PPC_MPC85XX_H
13#define __ASM_PPC_MPC85XX_H
14
15#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */
16#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/
17#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/
18
19/* Some parts define SVR[0:23] as the SOC version */
20#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */
21
22#define SVR_8533 0x803400
23#define SVR_8535 0x803701
24#define SVR_8536 0x803700
25#define SVR_8540 0x803000
26#define SVR_8541 0x807200
27#define SVR_8543 0x803200
28#define SVR_8544 0x803401
29#define SVR_8545 0x803102
30#define SVR_8547 0x803101
31#define SVR_8548 0x803100
32#define SVR_8555 0x807100
33#define SVR_8560 0x807000
34#define SVR_8567 0x807501
35#define SVR_8568 0x807500
36#define SVR_8569 0x808000
37#define SVR_8572 0x80E000
38#define SVR_P1010 0x80F100
39#define SVR_P1011 0x80E500
40#define SVR_P1012 0x80E501
41#define SVR_P1013 0x80E700
42#define SVR_P1014 0x80F101
43#define SVR_P1017 0x80F700
44#define SVR_P1020 0x80E400
45#define SVR_P1021 0x80E401
46#define SVR_P1022 0x80E600
47#define SVR_P1023 0x80F600
48#define SVR_P1024 0x80E402
49#define SVR_P1025 0x80E403
50#define SVR_P2010 0x80E300
51#define SVR_P2020 0x80E200
52#define SVR_P2040 0x821000
53#define SVR_P2041 0x821001
54#define SVR_P3041 0x821103
55#define SVR_P4040 0x820100
56#define SVR_P4080 0x820000
57#define SVR_P5010 0x822100
58#define SVR_P5020 0x822000
59#define SVR_P5021 0X820500
60#define SVR_P5040 0x820400
61#define SVR_T4240 0x824000
62#define SVR_T4120 0x824001
63#define SVR_T4160 0x824100
64#define SVR_C291 0x850000
65#define SVR_C292 0x850020
66#define SVR_C293 0x850030
67#define SVR_B4860 0X868000
68#define SVR_G4860 0x868001
69#define SVR_G4060 0x868003
70#define SVR_B4440 0x868100
71#define SVR_G4440 0x868101
72#define SVR_B4420 0x868102
73#define SVR_B4220 0x868103
74#define SVR_T1040 0x852000
75#define SVR_T1041 0x852001
76#define SVR_T1042 0x852002
77#define SVR_T1020 0x852100
78#define SVR_T1021 0x852101
79#define SVR_T1022 0x852102
80
81#define SVR_8610 0x80A000
82#define SVR_8641 0x809000
83#define SVR_8641D 0x809001
84
85#define SVR_9130 0x860001
86#define SVR_9131 0x860000
87#define SVR_9132 0x861000
88#define SVR_9232 0x861400
89
90#define SVR_Unknown 0xFFFFFF
91
92#endif
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 4a1ac9fbf186..754f93d208fa 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -396,7 +396,14 @@ extern struct bus_type mpic_subsys;
396#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */ 396#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
397 397
398/* Get the version of primary MPIC */ 398/* Get the version of primary MPIC */
399#ifdef CONFIG_MPIC
399extern u32 fsl_mpic_primary_get_version(void); 400extern u32 fsl_mpic_primary_get_version(void);
401#else
402static inline u32 fsl_mpic_primary_get_version(void)
403{
404 return 0;
405}
406#endif
400 407
401/* Allocate the controller structure and setup the linux irq descs 408/* Allocate the controller structure and setup the linux irq descs
402 * for the range if interrupts passed in. No HW initialization is 409 * for the range if interrupts passed in. No HW initialization is
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 029fe85722aa..c5cd72833d6e 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -124,6 +124,11 @@ extern int opal_enter_rtas(struct rtas_args *args,
124#define OPAL_PCI_POLL 62 124#define OPAL_PCI_POLL 62
125#define OPAL_PCI_MSI_EOI 63 125#define OPAL_PCI_MSI_EOI 63
126#define OPAL_PCI_GET_PHB_DIAG_DATA2 64 126#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
127#define OPAL_XSCOM_READ 65
128#define OPAL_XSCOM_WRITE 66
129#define OPAL_LPC_READ 67
130#define OPAL_LPC_WRITE 68
131#define OPAL_RETURN_CPU 69
127 132
128#ifndef __ASSEMBLY__ 133#ifndef __ASSEMBLY__
129 134
@@ -337,6 +342,17 @@ enum OpalEpowStatus {
337 OPAL_EPOW_OVER_INTERNAL_TEMP = 3 342 OPAL_EPOW_OVER_INTERNAL_TEMP = 3
338}; 343};
339 344
345/*
346 * Address cycle types for LPC accesses. These also correspond
347 * to the content of the first cell of the "reg" property for
348 * device nodes on the LPC bus
349 */
350enum OpalLPCAddressType {
351 OPAL_LPC_MEM = 0,
352 OPAL_LPC_IO = 1,
353 OPAL_LPC_FW = 2,
354};
355
340struct opal_machine_check_event { 356struct opal_machine_check_event {
341 enum OpalMCE_Version version:8; /* 0x00 */ 357 enum OpalMCE_Version version:8; /* 0x00 */
342 uint8_t in_use; /* 0x01 */ 358 uint8_t in_use; /* 0x01 */
@@ -631,6 +647,15 @@ int64_t opal_set_system_attention_led(uint8_t led_action);
631int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, 647int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
632 uint16_t *pci_error_type, uint16_t *severity); 648 uint16_t *pci_error_type, uint16_t *severity);
633int64_t opal_pci_poll(uint64_t phb_id); 649int64_t opal_pci_poll(uint64_t phb_id);
650int64_t opal_return_cpu(void);
651
652int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
653int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
654
655int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
656 uint32_t addr, uint32_t data, uint32_t sz);
657int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
658 uint32_t addr, uint32_t *data, uint32_t sz);
634 659
635/* Internal functions */ 660/* Internal functions */
636extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); 661extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
@@ -664,6 +689,8 @@ extern int opal_machine_check(struct pt_regs *regs);
664 689
665extern void opal_shutdown(void); 690extern void opal_shutdown(void);
666 691
692extern void opal_lpc_init(void);
693
667#endif /* __ASSEMBLY__ */ 694#endif /* __ASSEMBLY__ */
668 695
669#endif /* __OPAL_H */ 696#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 77c91e74b612..a5954cebbc55 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -68,8 +68,13 @@ struct paca_struct {
68 * instruction. They must travel together and be properly 68 * instruction. They must travel together and be properly
69 * aligned. 69 * aligned.
70 */ 70 */
71#ifdef __BIG_ENDIAN__
71 u16 lock_token; /* Constant 0x8000, used in locks */ 72 u16 lock_token; /* Constant 0x8000, used in locks */
72 u16 paca_index; /* Logical processor number */ 73 u16 paca_index; /* Logical processor number */
74#else
75 u16 paca_index; /* Logical processor number */
76 u16 lock_token; /* Constant 0x8000, used in locks */
77#endif
73 78
74 u64 kernel_toc; /* Kernel TOC address */ 79 u64 kernel_toc; /* Kernel TOC address */
75 u64 kernelbase; /* Base address of kernel */ 80 u64 kernelbase; /* Base address of kernel */
@@ -93,9 +98,9 @@ struct paca_struct {
93 * Now, starting in cacheline 2, the exception save areas 98 * Now, starting in cacheline 2, the exception save areas
94 */ 99 */
95 /* used for most interrupts/exceptions */ 100 /* used for most interrupts/exceptions */
96 u64 exgen[12] __attribute__((aligned(0x80))); 101 u64 exgen[13] __attribute__((aligned(0x80)));
97 u64 exmc[12]; /* used for machine checks */ 102 u64 exmc[13]; /* used for machine checks */
98 u64 exslb[12]; /* used for SLB/segment table misses 103 u64 exslb[13]; /* used for SLB/segment table misses
99 * on the linear mapping */ 104 * on the linear mapping */
100 /* SLB related definitions */ 105 /* SLB related definitions */
101 u16 vmalloc_sllp; 106 u16 vmalloc_sllp;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5b..b9f426212d3a 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) 211#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) 212#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
213#else 213#else
214#ifdef CONFIG_PPC64
215/*
216 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
217 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
218 */
219#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
220#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
221
222#else /* 32-bit, non book E */
214#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) 223#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
215#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) 224#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
216#endif 225#endif
226#endif
217 227
218/* 228/*
219 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, 229 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 32d0d2018faf..4ca90a39d6d0 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -159,7 +159,7 @@ struct pci_dn {
159 159
160 int pci_ext_config_space; /* for pci devices */ 160 int pci_ext_config_space; /* for pci devices */
161 161
162 int force_32bit_msi:1; 162 bool force_32bit_msi;
163 163
164 struct pci_dev *pcidev; /* back-pointer to the pci device */ 164 struct pci_dev *pcidev; /* back-pointer to the pci device */
165#ifdef CONFIG_EEH 165#ifdef CONFIG_EEH
diff --git a/arch/powerpc/include/asm/perf_event_fsl_emb.h b/arch/powerpc/include/asm/perf_event_fsl_emb.h
index 718a9fa94e68..a58165450f6f 100644
--- a/arch/powerpc/include/asm/perf_event_fsl_emb.h
+++ b/arch/powerpc/include/asm/perf_event_fsl_emb.h
@@ -13,7 +13,7 @@
13#include <linux/types.h> 13#include <linux/types.h>
14#include <asm/hw_irq.h> 14#include <asm/hw_irq.h>
15 15
16#define MAX_HWEVENTS 4 16#define MAX_HWEVENTS 6
17 17
18/* event flags */ 18/* event flags */
19#define FSL_EMB_EVENT_VALID 1 19#define FSL_EMB_EVENT_VALID 1
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8b2492644754..3fd2f1b6f906 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -138,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
138#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr 138#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
139 139
140#define EVENT_ATTR(_name, _id, _suffix) \ 140#define EVENT_ATTR(_name, _id, _suffix) \
141 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ 141 PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
142 power_events_sysfs_show) 142 power_events_sysfs_show)
143 143
144#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) 144#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
145#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) 145#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
146 146
147#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) 147#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
148#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) 148#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h
index f35787b6a5e0..a63b045e707c 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/include/asm/plpar_wrappers.h
@@ -1,5 +1,5 @@
1#ifndef _PSERIES_PLPAR_WRAPPERS_H 1#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
2#define _PSERIES_PLPAR_WRAPPERS_H 2#define _ASM_POWERPC_PLPAR_WRAPPERS_H
3 3
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/irqflags.h> 5#include <linux/irqflags.h>
@@ -256,30 +256,6 @@ static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
256 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count); 256 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
257} 257}
258 258
259static inline long plpar_get_term_char(unsigned long termno,
260 unsigned long *len_ret, char *buf_ret)
261{
262 long rc;
263 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
264 unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
265
266 rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno);
267
268 *len_ret = retbuf[0];
269 lbuf[0] = retbuf[1];
270 lbuf[1] = retbuf[2];
271
272 return rc;
273}
274
275static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
276 const char *buffer)
277{
278 unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */
279 return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
280 lbuf[1]);
281}
282
283/* Set various resource mode parameters */ 259/* Set various resource mode parameters */
284static inline long plpar_set_mode(unsigned long mflags, unsigned long resource, 260static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
285 unsigned long value1, unsigned long value2) 261 unsigned long value1, unsigned long value2)
@@ -321,4 +297,4 @@ static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawr
321 return plpar_set_mode(0, 2, dawr0, dawrx0); 297 return plpar_set_mode(0, 2, dawr0, dawrx0);
322} 298}
323 299
324#endif /* _PSERIES_PLPAR_WRAPPERS_H */ 300#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index eccfc161e58e..d7fe9f5b46d4 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -81,6 +81,53 @@
81#define __REGA0_R30 30 81#define __REGA0_R30 30
82#define __REGA0_R31 31 82#define __REGA0_R31 31
83 83
84/* opcode and xopcode for instructions */
85#define OP_TRAP 3
86#define OP_TRAP_64 2
87
88#define OP_31_XOP_TRAP 4
89#define OP_31_XOP_LWZX 23
90#define OP_31_XOP_DCBST 54
91#define OP_31_XOP_LWZUX 55
92#define OP_31_XOP_TRAP_64 68
93#define OP_31_XOP_DCBF 86
94#define OP_31_XOP_LBZX 87
95#define OP_31_XOP_STWX 151
96#define OP_31_XOP_STBX 215
97#define OP_31_XOP_LBZUX 119
98#define OP_31_XOP_STBUX 247
99#define OP_31_XOP_LHZX 279
100#define OP_31_XOP_LHZUX 311
101#define OP_31_XOP_MFSPR 339
102#define OP_31_XOP_LHAX 343
103#define OP_31_XOP_LHAUX 375
104#define OP_31_XOP_STHX 407
105#define OP_31_XOP_STHUX 439
106#define OP_31_XOP_MTSPR 467
107#define OP_31_XOP_DCBI 470
108#define OP_31_XOP_LWBRX 534
109#define OP_31_XOP_TLBSYNC 566
110#define OP_31_XOP_STWBRX 662
111#define OP_31_XOP_LHBRX 790
112#define OP_31_XOP_STHBRX 918
113
114#define OP_LWZ 32
115#define OP_LD 58
116#define OP_LWZU 33
117#define OP_LBZ 34
118#define OP_LBZU 35
119#define OP_STW 36
120#define OP_STWU 37
121#define OP_STD 62
122#define OP_STB 38
123#define OP_STBU 39
124#define OP_LHZ 40
125#define OP_LHZU 41
126#define OP_LHA 42
127#define OP_LHAU 43
128#define OP_STH 44
129#define OP_STHU 45
130
84/* sorted alphabetically */ 131/* sorted alphabetically */
85#define PPC_INST_BHRBE 0x7c00025c 132#define PPC_INST_BHRBE 0x7c00025c
86#define PPC_INST_CLRBHRB 0x7c00035c 133#define PPC_INST_CLRBHRB 0x7c00035c
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 2f1b6c5f8174..599545738af3 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -54,7 +54,8 @@ BEGIN_FW_FTR_SECTION; \
54 /* from user - see if there are any DTL entries to process */ \ 54 /* from user - see if there are any DTL entries to process */ \
55 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ 55 ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
56 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ 56 ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
57 ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \ 57 addi r10,r10,LPPACA_DTLIDX; \
58 LDX_BE r10,0,r10; /* get log write index */ \
58 cmpd cr1,r11,r10; \ 59 cmpd cr1,r11,r10; \
59 beq+ cr1,33f; \ 60 beq+ cr1,33f; \
60 bl .accumulate_stolen_time; \ 61 bl .accumulate_stolen_time; \
@@ -219,19 +220,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
219#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) 220#define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
220#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base) 221#define REST_16VSRS(n,b,base) REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
221#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base) 222#define REST_32VSRS(n,b,base) REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
222/* Save the upper 32 VSRs (32-63) in the thread VSX region (0-31) */
223#define SAVE_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); STXVD2X(n+32,R##base,R##b)
224#define SAVE_2VSRSU(n,b,base) SAVE_VSRU(n,b,base); SAVE_VSRU(n+1,b,base)
225#define SAVE_4VSRSU(n,b,base) SAVE_2VSRSU(n,b,base); SAVE_2VSRSU(n+2,b,base)
226#define SAVE_8VSRSU(n,b,base) SAVE_4VSRSU(n,b,base); SAVE_4VSRSU(n+4,b,base)
227#define SAVE_16VSRSU(n,b,base) SAVE_8VSRSU(n,b,base); SAVE_8VSRSU(n+8,b,base)
228#define SAVE_32VSRSU(n,b,base) SAVE_16VSRSU(n,b,base); SAVE_16VSRSU(n+16,b,base)
229#define REST_VSRU(n,b,base) li b,THREAD_VR0+(16*(n)); LXVD2X(n+32,R##base,R##b)
230#define REST_2VSRSU(n,b,base) REST_VSRU(n,b,base); REST_VSRU(n+1,b,base)
231#define REST_4VSRSU(n,b,base) REST_2VSRSU(n,b,base); REST_2VSRSU(n+2,b,base)
232#define REST_8VSRSU(n,b,base) REST_4VSRSU(n,b,base); REST_4VSRSU(n+4,b,base)
233#define REST_16VSRSU(n,b,base) REST_8VSRSU(n,b,base); REST_8VSRSU(n+8,b,base)
234#define REST_32VSRSU(n,b,base) REST_16VSRSU(n,b,base); REST_16VSRSU(n+16,b,base)
235 223
236/* 224/*
237 * b = base register for addressing, o = base offset from register of 1st EVR 225 * b = base register for addressing, o = base offset from register of 1st EVR
@@ -443,15 +431,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
443#define ISYNC_601 431#define ISYNC_601
444#endif 432#endif
445 433
446#ifdef CONFIG_PPC_CELL 434#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
447#define MFTB(dest) \ 435#define MFTB(dest) \
44890: mftb dest; \ 43690: mfspr dest, SPRN_TBRL; \
449BEGIN_FTR_SECTION_NESTED(96); \ 437BEGIN_FTR_SECTION_NESTED(96); \
450 cmpwi dest,0; \ 438 cmpwi dest,0; \
451 beq- 90b; \ 439 beq- 90b; \
452END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) 440END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
453#else 441#else
454#define MFTB(dest) mftb dest 442#define MFTB(dest) mfspr dest, SPRN_TBRL
455#endif 443#endif
456 444
457#ifndef CONFIG_SMP 445#ifndef CONFIG_SMP
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b08b963..e378cccfca55 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ 247 unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
248 struct pt_regs ckpt_regs; /* Checkpointed registers */ 248 struct pt_regs ckpt_regs; /* Checkpointed registers */
249 249
250 unsigned long tm_tar;
251 unsigned long tm_ppr;
252 unsigned long tm_dscr;
253
250 /* 254 /*
251 * Transactional FP and VSX 0-31 register set. 255 * Transactional FP and VSX 0-31 register set.
252 * NOTE: the sense of these is the opposite of the integer ckpt_regs! 256 * NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da154f68b..7d0c7f3a7171 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -38,14 +38,12 @@ extern unsigned long pci_address_to_pio(phys_addr_t address);
38/* Parse the ibm,dma-window property of an OF node into the busno, phys and 38/* Parse the ibm,dma-window property of an OF node into the busno, phys and
39 * size parameters. 39 * size parameters.
40 */ 40 */
41void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, 41void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
42 unsigned long *busno, unsigned long *phys, unsigned long *size); 42 unsigned long *busno, unsigned long *phys,
43 unsigned long *size);
43 44
44extern void kdump_move_device_tree(void); 45extern void kdump_move_device_tree(void);
45 46
46/* CPU OF node matching */
47struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
48
49/* cache lookup */ 47/* cache lookup */
50struct device_node *of_find_next_cache_node(struct device_node *np); 48struct device_node *of_find_next_cache_node(struct device_node *np);
51 49
@@ -58,6 +56,8 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; }
58 56
59extern void of_instantiate_rtc(void); 57extern void of_instantiate_rtc(void);
60 58
59extern int of_get_ibm_chip_id(struct device_node *np);
60
61/* The of_drconf_cell struct defines the layout of the LMB array 61/* The of_drconf_cell struct defines the layout of the LMB array
62 * specified in the device tree property 62 * specified in the device tree property
63 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory 63 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6840e4e24f7..10d1ef016bf1 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -115,10 +115,10 @@
115#define MSR_64BIT MSR_SF 115#define MSR_64BIT MSR_SF
116 116
117/* Server variant */ 117/* Server variant */
118#define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV 118#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
119#define MSR_KERNEL MSR_ | MSR_64BIT 119#define MSR_KERNEL (MSR_ | MSR_64BIT)
120#define MSR_USER32 MSR_ | MSR_PR | MSR_EE 120#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
121#define MSR_USER64 MSR_USER32 | MSR_64BIT 121#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
122#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) 122#elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx)
123/* Default MSR for kernel mode. */ 123/* Default MSR for kernel mode. */
124#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) 124#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR)
@@ -254,19 +254,28 @@
254#define SPRN_HRMOR 0x139 /* Real mode offset register */ 254#define SPRN_HRMOR 0x139 /* Real mode offset register */
255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ 255#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ 256#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
257/* HFSCR and FSCR bit numbers are the same */
258#define FSCR_TAR_LG 8 /* Enable Target Address Register */
259#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
260#define FSCR_TM_LG 5 /* Enable Transactional Memory */
261#define FSCR_BHRB_LG 4 /* Enable Branch History Rolling Buffer*/
262#define FSCR_PM_LG 3 /* Enable prob/priv access to PMU SPRs */
263#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
264#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
265#define FSCR_FP_LG 0 /* Enable Floating Point */
257#define SPRN_FSCR 0x099 /* Facility Status & Control Register */ 266#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
258#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 267#define FSCR_TAR __MASK(FSCR_TAR_LG)
259#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 268#define FSCR_EBB __MASK(FSCR_EBB_LG)
260#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 269#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
261#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ 270#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
262#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ 271#define HFSCR_TAR __MASK(FSCR_TAR_LG)
263#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ 272#define HFSCR_EBB __MASK(FSCR_EBB_LG)
264#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ 273#define HFSCR_TM __MASK(FSCR_TM_LG)
265#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ 274#define HFSCR_PM __MASK(FSCR_PM_LG)
266#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ 275#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
267#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ 276#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
268#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ 277#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
269#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ 278#define HFSCR_FP __MASK(FSCR_FP_LG)
270#define SPRN_TAR 0x32f /* Target Address Register */ 279#define SPRN_TAR 0x32f /* Target Address Register */
271#define SPRN_LPCR 0x13E /* LPAR Control Register */ 280#define SPRN_LPCR 0x13E /* LPAR Control Register */
272#define LPCR_VPM0 (1ul << (63-0)) 281#define LPCR_VPM0 (1ul << (63-0))
@@ -1117,10 +1126,10 @@
1117 : "memory") 1126 : "memory")
1118 1127
1119#ifdef __powerpc64__ 1128#ifdef __powerpc64__
1120#ifdef CONFIG_PPC_CELL 1129#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
1121#define mftb() ({unsigned long rval; \ 1130#define mftb() ({unsigned long rval; \
1122 asm volatile( \ 1131 asm volatile( \
1123 "90: mftb %0;\n" \ 1132 "90: mfspr %0, %2;\n" \
1124 "97: cmpwi %0,0;\n" \ 1133 "97: cmpwi %0,0;\n" \
1125 " beq- 90b;\n" \ 1134 " beq- 90b;\n" \
1126 "99:\n" \ 1135 "99:\n" \
@@ -1134,18 +1143,23 @@
1134 " .llong 0\n" \ 1143 " .llong 0\n" \
1135 " .llong 0\n" \ 1144 " .llong 0\n" \
1136 ".previous" \ 1145 ".previous" \
1137 : "=r" (rval) : "i" (CPU_FTR_CELL_TB_BUG)); rval;}) 1146 : "=r" (rval) \
1147 : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
1148 rval;})
1138#else 1149#else
1139#define mftb() ({unsigned long rval; \ 1150#define mftb() ({unsigned long rval; \
1140 asm volatile("mftb %0" : "=r" (rval)); rval;}) 1151 asm volatile("mfspr %0, %1" : \
1152 "=r" (rval) : "i" (SPRN_TBRL)); rval;})
1141#endif /* !CONFIG_PPC_CELL */ 1153#endif /* !CONFIG_PPC_CELL */
1142 1154
1143#else /* __powerpc64__ */ 1155#else /* __powerpc64__ */
1144 1156
1145#define mftbl() ({unsigned long rval; \ 1157#define mftbl() ({unsigned long rval; \
1146 asm volatile("mftbl %0" : "=r" (rval)); rval;}) 1158 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1159 "i" (SPRN_TBRL)); rval;})
1147#define mftbu() ({unsigned long rval; \ 1160#define mftbu() ({unsigned long rval; \
1148 asm volatile("mftbu %0" : "=r" (rval)); rval;}) 1161 asm volatile("mfspr %0, %1" : "=r" (rval) : \
1162 "i" (SPRN_TBRU)); rval;})
1149#endif /* !__powerpc64__ */ 1163#endif /* !__powerpc64__ */
1150 1164
1151#define mttbl(v) asm volatile("mttbl %0":: "r"(v)) 1165#define mttbl(v) asm volatile("mttbl %0":: "r"(v))
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b417de3cc2c4..ed8f836da094 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -29,10 +29,10 @@
29#if defined(CONFIG_PPC_BOOK3E_64) 29#if defined(CONFIG_PPC_BOOK3E_64)
30#define MSR_64BIT MSR_CM 30#define MSR_64BIT MSR_CM
31 31
32#define MSR_ MSR_ME | MSR_CE 32#define MSR_ (MSR_ME | MSR_CE)
33#define MSR_KERNEL MSR_ | MSR_64BIT 33#define MSR_KERNEL (MSR_ | MSR_64BIT)
34#define MSR_USER32 MSR_ | MSR_PR | MSR_EE 34#define MSR_USER32 (MSR_ | MSR_PR | MSR_EE)
35#define MSR_USER64 MSR_USER32 | MSR_64BIT 35#define MSR_USER64 (MSR_USER32 | MSR_64BIT)
36#elif defined (CONFIG_40x) 36#elif defined (CONFIG_40x)
37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) 37#define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE)
38#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) 38#define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE)
diff --git a/arch/powerpc/include/asm/reg_fsl_emb.h b/arch/powerpc/include/asm/reg_fsl_emb.h
index 77bb71cfd991..0e3ddf5177f6 100644
--- a/arch/powerpc/include/asm/reg_fsl_emb.h
+++ b/arch/powerpc/include/asm/reg_fsl_emb.h
@@ -17,12 +17,16 @@
17/* Freescale Book E Performance Monitor APU Registers */ 17/* Freescale Book E Performance Monitor APU Registers */
18#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */ 18#define PMRN_PMC0 0x010 /* Performance Monitor Counter 0 */
19#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */ 19#define PMRN_PMC1 0x011 /* Performance Monitor Counter 1 */
20#define PMRN_PMC2 0x012 /* Performance Monitor Counter 1 */ 20#define PMRN_PMC2 0x012 /* Performance Monitor Counter 2 */
21#define PMRN_PMC3 0x013 /* Performance Monitor Counter 1 */ 21#define PMRN_PMC3 0x013 /* Performance Monitor Counter 3 */
22#define PMRN_PMC4 0x014 /* Performance Monitor Counter 4 */
23#define PMRN_PMC5 0x015 /* Performance Monitor Counter 5 */
22#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */ 24#define PMRN_PMLCA0 0x090 /* PM Local Control A0 */
23#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */ 25#define PMRN_PMLCA1 0x091 /* PM Local Control A1 */
24#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */ 26#define PMRN_PMLCA2 0x092 /* PM Local Control A2 */
25#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */ 27#define PMRN_PMLCA3 0x093 /* PM Local Control A3 */
28#define PMRN_PMLCA4 0x094 /* PM Local Control A4 */
29#define PMRN_PMLCA5 0x095 /* PM Local Control A5 */
26 30
27#define PMLCA_FC 0x80000000 /* Freeze Counter */ 31#define PMLCA_FC 0x80000000 /* Freeze Counter */
28#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */ 32#define PMLCA_FCS 0x40000000 /* Freeze in Supervisor */
@@ -30,14 +34,18 @@
30#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */ 34#define PMLCA_FCM1 0x10000000 /* Freeze when PMM==1 */
31#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */ 35#define PMLCA_FCM0 0x08000000 /* Freeze when PMM==0 */
32#define PMLCA_CE 0x04000000 /* Condition Enable */ 36#define PMLCA_CE 0x04000000 /* Condition Enable */
37#define PMLCA_FGCS1 0x00000002 /* Freeze in guest state */
38#define PMLCA_FGCS0 0x00000001 /* Freeze in hypervisor state */
33 39
34#define PMLCA_EVENT_MASK 0x00ff0000 /* Event field */ 40#define PMLCA_EVENT_MASK 0x01ff0000 /* Event field */
35#define PMLCA_EVENT_SHIFT 16 41#define PMLCA_EVENT_SHIFT 16
36 42
37#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */ 43#define PMRN_PMLCB0 0x110 /* PM Local Control B0 */
38#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */ 44#define PMRN_PMLCB1 0x111 /* PM Local Control B1 */
39#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */ 45#define PMRN_PMLCB2 0x112 /* PM Local Control B2 */
40#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */ 46#define PMRN_PMLCB3 0x113 /* PM Local Control B3 */
47#define PMRN_PMLCB4 0x114 /* PM Local Control B4 */
48#define PMRN_PMLCB5 0x115 /* PM Local Control B5 */
41 49
42#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */ 50#define PMLCB_THRESHMUL_MASK 0x0700 /* Threshold Multiple Field */
43#define PMLCB_THRESHMUL_SHIFT 8 51#define PMLCB_THRESHMUL_SHIFT 8
@@ -55,16 +63,22 @@
55 63
56#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */ 64#define PMRN_UPMC0 0x000 /* User Performance Monitor Counter 0 */
57#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */ 65#define PMRN_UPMC1 0x001 /* User Performance Monitor Counter 1 */
58#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 1 */ 66#define PMRN_UPMC2 0x002 /* User Performance Monitor Counter 2 */
59#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 1 */ 67#define PMRN_UPMC3 0x003 /* User Performance Monitor Counter 3 */
68#define PMRN_UPMC4 0x004 /* User Performance Monitor Counter 4 */
69#define PMRN_UPMC5 0x005 /* User Performance Monitor Counter 5 */
60#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */ 70#define PMRN_UPMLCA0 0x080 /* User PM Local Control A0 */
61#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */ 71#define PMRN_UPMLCA1 0x081 /* User PM Local Control A1 */
62#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */ 72#define PMRN_UPMLCA2 0x082 /* User PM Local Control A2 */
63#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */ 73#define PMRN_UPMLCA3 0x083 /* User PM Local Control A3 */
74#define PMRN_UPMLCA4 0x084 /* User PM Local Control A4 */
75#define PMRN_UPMLCA5 0x085 /* User PM Local Control A5 */
64#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */ 76#define PMRN_UPMLCB0 0x100 /* User PM Local Control B0 */
65#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */ 77#define PMRN_UPMLCB1 0x101 /* User PM Local Control B1 */
66#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */ 78#define PMRN_UPMLCB2 0x102 /* User PM Local Control B2 */
67#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */ 79#define PMRN_UPMLCB3 0x103 /* User PM Local Control B3 */
80#define PMRN_UPMLCB4 0x104 /* User PM Local Control B4 */
81#define PMRN_UPMLCB5 0x105 /* User PM Local Control B5 */
68#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */ 82#define PMRN_UPMGC0 0x180 /* User PM Global Control 0 */
69 83
70 84
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index c7a8bfc9f6f5..9bd52c65e66f 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -44,12 +44,12 @@
44 * 44 *
45 */ 45 */
46 46
47typedef u32 rtas_arg_t; 47typedef __be32 rtas_arg_t;
48 48
49struct rtas_args { 49struct rtas_args {
50 u32 token; 50 __be32 token;
51 u32 nargs; 51 __be32 nargs;
52 u32 nret; 52 __be32 nret;
53 rtas_arg_t args[16]; 53 rtas_arg_t args[16];
54 rtas_arg_t *rets; /* Pointer to return values in args[]. */ 54 rtas_arg_t *rets; /* Pointer to return values in args[]. */
55}; 55};
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 48cfc858abd6..98da78e0c2c0 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -112,6 +112,7 @@ static inline struct cpumask *cpu_core_mask(int cpu)
112} 112}
113 113
114extern int cpu_to_core_id(int cpu); 114extern int cpu_to_core_id(int cpu);
115extern int cpu_to_chip_id(int cpu);
115 116
116/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 117/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
117 * 118 *
@@ -186,6 +187,8 @@ extern int smt_enabled_at_boot;
186extern int smp_mpic_probe(void); 187extern int smp_mpic_probe(void);
187extern void smp_mpic_setup_cpu(int cpu); 188extern void smp_mpic_setup_cpu(int cpu);
188extern int smp_generic_kick_cpu(int nr); 189extern int smp_generic_kick_cpu(int nr);
190extern int smp_generic_cpu_bootable(unsigned int nr);
191
189 192
190extern void smp_generic_give_timebase(void); 193extern void smp_generic_give_timebase(void);
191extern void smp_generic_take_timebase(void); 194extern void smp_generic_take_timebase(void);
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 5b23f910ee57..5f54a744dcc5 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -32,8 +32,12 @@
32 32
33#ifdef CONFIG_PPC64 33#ifdef CONFIG_PPC64
34/* use 0x800000yy when locked, where yy == CPU number */ 34/* use 0x800000yy when locked, where yy == CPU number */
35#ifdef __BIG_ENDIAN__
35#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 36#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
36#else 37#else
38#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
39#endif
40#else
37#define LOCK_TOKEN 1 41#define LOCK_TOKEN 1
38#endif 42#endif
39 43
@@ -96,7 +100,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
96 100
97#if defined(CONFIG_PPC_SPLPAR) 101#if defined(CONFIG_PPC_SPLPAR)
98/* We only yield to the hypervisor if we are in shared processor mode */ 102/* We only yield to the hypervisor if we are in shared processor mode */
99#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc) 103#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
100extern void __spin_yield(arch_spinlock_t *lock); 104extern void __spin_yield(arch_spinlock_t *lock);
101extern void __rw_yield(arch_rwlock_t *lock); 105extern void __rw_yield(arch_rwlock_t *lock);
102#else /* SPLPAR */ 106#else /* SPLPAR */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0ef234..2be5618cdec6 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,12 +15,18 @@ extern struct task_struct *__switch_to(struct task_struct *,
15struct thread_struct; 15struct thread_struct;
16extern struct task_struct *_switch(struct thread_struct *prev, 16extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next); 17 struct thread_struct *next);
18#ifdef CONFIG_PPC_BOOK3S_64
19static inline void save_tar(struct thread_struct *prev)
20{
21 if (cpu_has_feature(CPU_FTR_ARCH_207S))
22 prev->tar = mfspr(SPRN_TAR);
23}
24#else
25static inline void save_tar(struct thread_struct *prev) {}
26#endif
18 27
19extern void giveup_fpu(struct task_struct *);
20extern void load_up_fpu(void); 28extern void load_up_fpu(void);
21extern void disable_kernel_fp(void);
22extern void enable_kernel_fp(void); 29extern void enable_kernel_fp(void);
23extern void flush_fp_to_thread(struct task_struct *);
24extern void enable_kernel_altivec(void); 30extern void enable_kernel_altivec(void);
25extern void load_up_altivec(struct task_struct *); 31extern void load_up_altivec(struct task_struct *);
26extern int emulate_altivec(struct pt_regs *); 32extern int emulate_altivec(struct pt_regs *);
@@ -38,6 +44,14 @@ static inline void discard_lazy_cpu_state(void)
38} 44}
39#endif 45#endif
40 46
47#ifdef CONFIG_PPC_FPU
48extern void flush_fp_to_thread(struct task_struct *);
49extern void giveup_fpu(struct task_struct *);
50#else
51static inline void flush_fp_to_thread(struct task_struct *t) { }
52static inline void giveup_fpu(struct task_struct *t) { }
53#endif
54
41#ifdef CONFIG_ALTIVEC 55#ifdef CONFIG_ALTIVEC
42extern void flush_altivec_to_thread(struct task_struct *); 56extern void flush_altivec_to_thread(struct task_struct *);
43extern void giveup_altivec(struct task_struct *); 57extern void giveup_altivec(struct task_struct *);
diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h
index c55e14f7ef44..18908caa1f3b 100644
--- a/arch/powerpc/include/asm/timex.h
+++ b/arch/powerpc/include/asm/timex.h
@@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void)
29 ret = 0; 29 ret = 0;
30 30
31 __asm__ __volatile__( 31 __asm__ __volatile__(
32 "97: mftb %0\n" 32 "97: mfspr %0, %2\n"
33 "99:\n" 33 "99:\n"
34 ".section __ftr_fixup,\"a\"\n" 34 ".section __ftr_fixup,\"a\"\n"
35 ".align 2\n" 35 ".align 2\n"
@@ -41,7 +41,7 @@ static inline cycles_t get_cycles(void)
41 " .long 0\n" 41 " .long 0\n"
42 " .long 0\n" 42 " .long 0\n"
43 ".previous" 43 ".previous"
44 : "=r" (ret) : "i" (CPU_FTR_601)); 44 : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL));
45 return ret; 45 return ret;
46#endif 46#endif
47} 47}
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 161ab662843b..89e3ef2496ac 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -96,6 +96,7 @@ static inline int prrn_is_enabled(void)
96#ifdef CONFIG_PPC64 96#ifdef CONFIG_PPC64
97#include <asm/smp.h> 97#include <asm/smp.h>
98 98
99#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
99#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 100#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
100#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 101#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
101#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 102#define topology_core_id(cpu) (cpu_to_core_id(cpu))
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index dc590919f8eb..b51fba10e733 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -27,10 +27,11 @@ extern void udbg_printf(const char *fmt, ...)
27 __attribute__ ((format (printf, 1, 2))); 27 __attribute__ ((format (printf, 1, 2)));
28extern void udbg_progress(char *s, unsigned short hex); 28extern void udbg_progress(char *s, unsigned short hex);
29 29
30extern void udbg_init_uart(void __iomem *comport, unsigned int speed, 30extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
31 unsigned int clock); 31extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
32extern unsigned int udbg_probe_uart_speed(void __iomem *comport, 32
33 unsigned int clock); 33extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
34extern unsigned int udbg_probe_uart_speed(unsigned int clock);
34 35
35struct device_node; 36struct device_node;
36extern void udbg_scc_init(int force_scc); 37extern void udbg_scc_init(int force_scc);
diff --git a/arch/powerpc/include/uapi/asm/elf.h b/arch/powerpc/include/uapi/asm/elf.h
index 05b8d560cfba..7e39c9146a71 100644
--- a/arch/powerpc/include/uapi/asm/elf.h
+++ b/arch/powerpc/include/uapi/asm/elf.h
@@ -107,26 +107,25 @@ typedef elf_gregset_t32 compat_elf_gregset_t;
107# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */ 107# define ELF_NVRREG 34 /* includes vscr & vrsave in split vectors */
108# define ELF_NVSRHALFREG 32 /* Half the vsx registers */ 108# define ELF_NVSRHALFREG 32 /* Half the vsx registers */
109# define ELF_GREG_TYPE elf_greg_t64 109# define ELF_GREG_TYPE elf_greg_t64
110# define ELF_ARCH EM_PPC64
111# define ELF_CLASS ELFCLASS64
112typedef elf_greg_t64 elf_greg_t;
113typedef elf_gregset_t64 elf_gregset_t;
110#else 114#else
111# define ELF_NEVRREG 34 /* includes acc (as 2) */ 115# define ELF_NEVRREG 34 /* includes acc (as 2) */
112# define ELF_NVRREG 33 /* includes vscr */ 116# define ELF_NVRREG 33 /* includes vscr */
113# define ELF_GREG_TYPE elf_greg_t32 117# define ELF_GREG_TYPE elf_greg_t32
114# define ELF_ARCH EM_PPC 118# define ELF_ARCH EM_PPC
115# define ELF_CLASS ELFCLASS32 119# define ELF_CLASS ELFCLASS32
116# define ELF_DATA ELFDATA2MSB 120typedef elf_greg_t32 elf_greg_t;
121typedef elf_gregset_t32 elf_gregset_t;
117#endif /* __powerpc64__ */ 122#endif /* __powerpc64__ */
118 123
119#ifndef ELF_ARCH 124#ifdef __BIG_ENDIAN__
120# define ELF_ARCH EM_PPC64 125#define ELF_DATA ELFDATA2MSB
121# define ELF_CLASS ELFCLASS64
122# define ELF_DATA ELFDATA2MSB
123 typedef elf_greg_t64 elf_greg_t;
124 typedef elf_gregset_t64 elf_gregset_t;
125#else 126#else
126 /* Assumption: ELF_ARCH == EM_PPC and ELF_CLASS == ELFCLASS32 */ 127#define ELF_DATA ELFDATA2LSB
127 typedef elf_greg_t32 elf_greg_t; 128#endif
128 typedef elf_gregset_t32 elf_gregset_t;
129#endif /* ELF_ARCH */
130 129
131/* Floating point registers */ 130/* Floating point registers */
132typedef double elf_fpreg_t; 131typedef double elf_fpreg_t;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index a8619bfe879e..445cb6e39d5b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -55,7 +55,6 @@ obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y-y)
55obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o 55obj-$(CONFIG_PPC_RTAS_DAEMON) += rtasd.o
56obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o 56obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
57obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 57obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
58obj-$(CONFIG_LPARCFG) += lparcfg.o
59obj-$(CONFIG_IBMVIO) += vio.o 58obj-$(CONFIG_IBMVIO) += vio.o
60obj-$(CONFIG_IBMEBUS) += ibmebus.o 59obj-$(CONFIG_IBMEBUS) += ibmebus.o
61obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \ 60obj-$(CONFIG_EEH) += eeh.o eeh_pe.o eeh_dev.o eeh_cache.o \
@@ -117,9 +116,7 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
117obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 116obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
118obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 117obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
119 118
120obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 119ifneq ($(CONFIG_PPC_INDIRECT_PIO),y)
121
122ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
123obj-y += iomap.o 120obj-y += iomap.o
124endif 121endif
125 122
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ee5b690a0bed..a27ccd5dc6b9 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -651,6 +651,10 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
651 int sw = 0; 651 int sw = 0;
652 int i, j; 652 int i, j;
653 653
654 /* userland only */
655 if (unlikely(!user_mode(regs)))
656 return 0;
657
654 flush_vsx_to_thread(current); 658 flush_vsx_to_thread(current);
655 659
656 if (reg < 32) 660 if (reg < 32)
@@ -764,6 +768,16 @@ int fix_alignment(struct pt_regs *regs)
764 nb = aligninfo[instr].len; 768 nb = aligninfo[instr].len;
765 flags = aligninfo[instr].flags; 769 flags = aligninfo[instr].flags;
766 770
771 /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
772 if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
773 nb = 8;
774 flags = LD+SW;
775 } else if (IS_XFORM(instruction) &&
776 ((instruction >> 1) & 0x3ff) == 660) {
777 nb = 8;
778 flags = ST+SW;
779 }
780
767 /* Byteswap little endian loads and stores */ 781 /* Byteswap little endian loads and stores */
768 swiz = 0; 782 swiz = 0;
769 if (regs->msr & MSR_LE) { 783 if (regs->msr & MSR_LE) {
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc2ead0..d8958be5f31a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@ int main(void)
138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); 138 DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); 139 DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); 140 DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
141 DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
142 DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
143 DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
141 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); 144 DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
142 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, 145 DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
143 transact_vr[0])); 146 transact_vr[0]));
@@ -451,6 +454,7 @@ int main(void)
451 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); 454 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
452 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); 455 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
453#endif 456#endif
457 DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
454 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4)); 458 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
455 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5)); 459 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
456 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6)); 460 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index ac8f52732fde..41c011cb6070 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -25,11 +25,6 @@
25static void scrollscreen(void); 25static void scrollscreen(void);
26#endif 26#endif
27 27
28static void draw_byte(unsigned char c, long locX, long locY);
29static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
30static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
31static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
32
33#define __force_data __attribute__((__section__(".data"))) 28#define __force_data __attribute__((__section__(".data")))
34 29
35static int g_loc_X __force_data; 30static int g_loc_X __force_data;
@@ -52,6 +47,26 @@ static unsigned char vga_font[cmapsz];
52int boot_text_mapped __force_data = 0; 47int boot_text_mapped __force_data = 0;
53int force_printk_to_btext = 0; 48int force_printk_to_btext = 0;
54 49
50extern void rmci_on(void);
51extern void rmci_off(void);
52
53static inline void rmci_maybe_on(void)
54{
55#if defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) && defined(CONFIG_PPC64)
56 if (!(mfmsr() & MSR_DR))
57 rmci_on();
58#endif
59}
60
61static inline void rmci_maybe_off(void)
62{
63#if defined(CONFIG_PPC_EARLY_DEBUG_BOOTX) && defined(CONFIG_PPC64)
64 if (!(mfmsr() & MSR_DR))
65 rmci_off();
66#endif
67}
68
69
55#ifdef CONFIG_PPC32 70#ifdef CONFIG_PPC32
56/* Calc BAT values for mapping the display and store them 71/* Calc BAT values for mapping the display and store them
57 * in disp_BAT. Those values are then used from head.S to map 72 * in disp_BAT. Those values are then used from head.S to map
@@ -134,7 +149,7 @@ void __init btext_unmap(void)
134 * changes. 149 * changes.
135 */ 150 */
136 151
137static void map_boot_text(void) 152void btext_map(void)
138{ 153{
139 unsigned long base, offset, size; 154 unsigned long base, offset, size;
140 unsigned char *vbase; 155 unsigned char *vbase;
@@ -209,7 +224,7 @@ int btext_initialize(struct device_node *np)
209 dispDeviceRect[2] = width; 224 dispDeviceRect[2] = width;
210 dispDeviceRect[3] = height; 225 dispDeviceRect[3] = height;
211 226
212 map_boot_text(); 227 btext_map();
213 228
214 return 0; 229 return 0;
215} 230}
@@ -283,7 +298,7 @@ void btext_update_display(unsigned long phys, int width, int height,
283 iounmap(logicalDisplayBase); 298 iounmap(logicalDisplayBase);
284 boot_text_mapped = 0; 299 boot_text_mapped = 0;
285 } 300 }
286 map_boot_text(); 301 btext_map();
287 g_loc_X = 0; 302 g_loc_X = 0;
288 g_loc_Y = 0; 303 g_loc_Y = 0;
289 g_max_loc_X = width / 8; 304 g_max_loc_X = width / 8;
@@ -298,6 +313,7 @@ void btext_clearscreen(void)
298 (dispDeviceDepth >> 3)) >> 2; 313 (dispDeviceDepth >> 3)) >> 2;
299 int i,j; 314 int i,j;
300 315
316 rmci_maybe_on();
301 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) 317 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
302 { 318 {
303 unsigned int *ptr = base; 319 unsigned int *ptr = base;
@@ -305,6 +321,7 @@ void btext_clearscreen(void)
305 *(ptr++) = 0; 321 *(ptr++) = 0;
306 base += (dispDeviceRowBytes >> 2); 322 base += (dispDeviceRowBytes >> 2);
307 } 323 }
324 rmci_maybe_off();
308} 325}
309 326
310void btext_flushscreen(void) 327void btext_flushscreen(void)
@@ -355,6 +372,8 @@ static void scrollscreen(void)
355 (dispDeviceDepth >> 3)) >> 2; 372 (dispDeviceDepth >> 3)) >> 2;
356 int i,j; 373 int i,j;
357 374
375 rmci_maybe_on();
376
358 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) 377 for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
359 { 378 {
360 unsigned int *src_ptr = src; 379 unsigned int *src_ptr = src;
@@ -371,9 +390,116 @@ static void scrollscreen(void)
371 *(dst_ptr++) = 0; 390 *(dst_ptr++) = 0;
372 dst += (dispDeviceRowBytes >> 2); 391 dst += (dispDeviceRowBytes >> 2);
373 } 392 }
393
394 rmci_maybe_off();
374} 395}
375#endif /* ndef NO_SCROLL */ 396#endif /* ndef NO_SCROLL */
376 397
398static unsigned int expand_bits_8[16] = {
399 0x00000000,
400 0x000000ff,
401 0x0000ff00,
402 0x0000ffff,
403 0x00ff0000,
404 0x00ff00ff,
405 0x00ffff00,
406 0x00ffffff,
407 0xff000000,
408 0xff0000ff,
409 0xff00ff00,
410 0xff00ffff,
411 0xffff0000,
412 0xffff00ff,
413 0xffffff00,
414 0xffffffff
415};
416
417static unsigned int expand_bits_16[4] = {
418 0x00000000,
419 0x0000ffff,
420 0xffff0000,
421 0xffffffff
422};
423
424
425static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
426{
427 int l, bits;
428 int fg = 0xFFFFFFFFUL;
429 int bg = 0x00000000UL;
430
431 for (l = 0; l < 16; ++l)
432 {
433 bits = *font++;
434 base[0] = (-(bits >> 7) & fg) ^ bg;
435 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
436 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
437 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
438 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
439 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
440 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
441 base[7] = (-(bits & 1) & fg) ^ bg;
442 base = (unsigned int *) ((char *)base + rb);
443 }
444}
445
446static inline void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
447{
448 int l, bits;
449 int fg = 0xFFFFFFFFUL;
450 int bg = 0x00000000UL;
451 unsigned int *eb = (int *)expand_bits_16;
452
453 for (l = 0; l < 16; ++l)
454 {
455 bits = *font++;
456 base[0] = (eb[bits >> 6] & fg) ^ bg;
457 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
458 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
459 base[3] = (eb[bits & 3] & fg) ^ bg;
460 base = (unsigned int *) ((char *)base + rb);
461 }
462}
463
464static inline void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
465{
466 int l, bits;
467 int fg = 0x0F0F0F0FUL;
468 int bg = 0x00000000UL;
469 unsigned int *eb = (int *)expand_bits_8;
470
471 for (l = 0; l < 16; ++l)
472 {
473 bits = *font++;
474 base[0] = (eb[bits >> 4] & fg) ^ bg;
475 base[1] = (eb[bits & 0xf] & fg) ^ bg;
476 base = (unsigned int *) ((char *)base + rb);
477 }
478}
479
480static noinline void draw_byte(unsigned char c, long locX, long locY)
481{
482 unsigned char *base = calc_base(locX << 3, locY << 4);
483 unsigned char *font = &vga_font[((unsigned int)c) * 16];
484 int rb = dispDeviceRowBytes;
485
486 rmci_maybe_on();
487 switch(dispDeviceDepth) {
488 case 24:
489 case 32:
490 draw_byte_32(font, (unsigned int *)base, rb);
491 break;
492 case 15:
493 case 16:
494 draw_byte_16(font, (unsigned int *)base, rb);
495 break;
496 case 8:
497 draw_byte_8(font, (unsigned int *)base, rb);
498 break;
499 }
500 rmci_maybe_off();
501}
502
377void btext_drawchar(char c) 503void btext_drawchar(char c)
378{ 504{
379 int cline = 0; 505 int cline = 0;
@@ -465,107 +591,12 @@ void btext_drawhex(unsigned long v)
465 btext_drawchar(' '); 591 btext_drawchar(' ');
466} 592}
467 593
468static void draw_byte(unsigned char c, long locX, long locY) 594void __init udbg_init_btext(void)
469{
470 unsigned char *base = calc_base(locX << 3, locY << 4);
471 unsigned char *font = &vga_font[((unsigned int)c) * 16];
472 int rb = dispDeviceRowBytes;
473
474 switch(dispDeviceDepth) {
475 case 24:
476 case 32:
477 draw_byte_32(font, (unsigned int *)base, rb);
478 break;
479 case 15:
480 case 16:
481 draw_byte_16(font, (unsigned int *)base, rb);
482 break;
483 case 8:
484 draw_byte_8(font, (unsigned int *)base, rb);
485 break;
486 }
487}
488
489static unsigned int expand_bits_8[16] = {
490 0x00000000,
491 0x000000ff,
492 0x0000ff00,
493 0x0000ffff,
494 0x00ff0000,
495 0x00ff00ff,
496 0x00ffff00,
497 0x00ffffff,
498 0xff000000,
499 0xff0000ff,
500 0xff00ff00,
501 0xff00ffff,
502 0xffff0000,
503 0xffff00ff,
504 0xffffff00,
505 0xffffffff
506};
507
508static unsigned int expand_bits_16[4] = {
509 0x00000000,
510 0x0000ffff,
511 0xffff0000,
512 0xffffffff
513};
514
515
516static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
517{
518 int l, bits;
519 int fg = 0xFFFFFFFFUL;
520 int bg = 0x00000000UL;
521
522 for (l = 0; l < 16; ++l)
523 {
524 bits = *font++;
525 base[0] = (-(bits >> 7) & fg) ^ bg;
526 base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
527 base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
528 base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
529 base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
530 base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
531 base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
532 base[7] = (-(bits & 1) & fg) ^ bg;
533 base = (unsigned int *) ((char *)base + rb);
534 }
535}
536
537static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
538{
539 int l, bits;
540 int fg = 0xFFFFFFFFUL;
541 int bg = 0x00000000UL;
542 unsigned int *eb = (int *)expand_bits_16;
543
544 for (l = 0; l < 16; ++l)
545 {
546 bits = *font++;
547 base[0] = (eb[bits >> 6] & fg) ^ bg;
548 base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
549 base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
550 base[3] = (eb[bits & 3] & fg) ^ bg;
551 base = (unsigned int *) ((char *)base + rb);
552 }
553}
554
555static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
556{ 595{
557 int l, bits; 596 /* If btext is enabled, we might have a BAT setup for early display,
558 int fg = 0x0F0F0F0FUL; 597 * thus we do enable some very basic udbg output
559 int bg = 0x00000000UL; 598 */
560 unsigned int *eb = (int *)expand_bits_8; 599 udbg_putc = btext_drawchar;
561
562 for (l = 0; l < 16; ++l)
563 {
564 bits = *font++;
565 base[0] = (eb[bits >> 4] & fg) ^ bg;
566 base[1] = (eb[bits & 0xf] & fg) ^ bg;
567 base = (unsigned int *) ((char *)base + rb);
568 }
569} 600}
570 601
571static unsigned char vga_font[cmapsz] = { 602static unsigned char vga_font[cmapsz] = {
@@ -913,10 +944,3 @@ static unsigned char vga_font[cmapsz] = {
9130x00, 0x00, 0x00, 0x00, 9440x00, 0x00, 0x00, 0x00,
914}; 945};
915 946
916void __init udbg_init_btext(void)
917{
918 /* If btext is enabled, we might have a BAT setup for early display,
919 * thus we do enable some very basic udbg output
920 */
921 udbg_putc = btext_drawchar;
922}
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 9262cf2bec4b..654932727873 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -196,7 +196,7 @@ static void cache_cpu_set(struct cache *cache, int cpu)
196static int cache_size(const struct cache *cache, unsigned int *ret) 196static int cache_size(const struct cache *cache, unsigned int *ret)
197{ 197{
198 const char *propname; 198 const char *propname;
199 const u32 *cache_size; 199 const __be32 *cache_size;
200 200
201 propname = cache_type_info[cache->type].size_prop; 201 propname = cache_type_info[cache->type].size_prop;
202 202
@@ -204,7 +204,7 @@ static int cache_size(const struct cache *cache, unsigned int *ret)
204 if (!cache_size) 204 if (!cache_size)
205 return -ENODEV; 205 return -ENODEV;
206 206
207 *ret = *cache_size; 207 *ret = of_read_number(cache_size, 1);
208 return 0; 208 return 0;
209} 209}
210 210
@@ -222,7 +222,7 @@ static int cache_size_kb(const struct cache *cache, unsigned int *ret)
222/* not cache_line_size() because that's a macro in include/linux/cache.h */ 222/* not cache_line_size() because that's a macro in include/linux/cache.h */
223static int cache_get_line_size(const struct cache *cache, unsigned int *ret) 223static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
224{ 224{
225 const u32 *line_size; 225 const __be32 *line_size;
226 int i, lim; 226 int i, lim;
227 227
228 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); 228 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
@@ -239,14 +239,14 @@ static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
239 if (!line_size) 239 if (!line_size)
240 return -ENODEV; 240 return -ENODEV;
241 241
242 *ret = *line_size; 242 *ret = of_read_number(line_size, 1);
243 return 0; 243 return 0;
244} 244}
245 245
246static int cache_nr_sets(const struct cache *cache, unsigned int *ret) 246static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
247{ 247{
248 const char *propname; 248 const char *propname;
249 const u32 *nr_sets; 249 const __be32 *nr_sets;
250 250
251 propname = cache_type_info[cache->type].nr_sets_prop; 251 propname = cache_type_info[cache->type].nr_sets_prop;
252 252
@@ -254,7 +254,7 @@ static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
254 if (!nr_sets) 254 if (!nr_sets)
255 return -ENODEV; 255 return -ENODEV;
256 256
257 *ret = *nr_sets; 257 *ret = of_read_number(nr_sets, 1);
258 return 0; 258 return 0;
259} 259}
260 260
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 0b9af015bedc..bfb18c7290b7 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -75,7 +75,7 @@ _GLOBAL(__setup_cpu_e500v2)
75 bl __e500_icache_setup 75 bl __e500_icache_setup
76 bl __e500_dcache_setup 76 bl __e500_dcache_setup
77 bl __setup_e500_ivors 77 bl __setup_e500_ivors
78#ifdef CONFIG_FSL_RIO 78#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
79 /* Ensure that RFXE is set */ 79 /* Ensure that RFXE is set */
80 mfspr r3,SPRN_HID1 80 mfspr r3,SPRN_HID1
81 oris r3,r3,HID1_RFXE@h 81 oris r3,r3,HID1_RFXE@h
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 22973a74df73..597d954e5860 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2105,7 +2105,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
2105 MMU_FTR_USE_TLBILX, 2105 MMU_FTR_USE_TLBILX,
2106 .icache_bsize = 64, 2106 .icache_bsize = 64,
2107 .dcache_bsize = 64, 2107 .dcache_bsize = 64,
2108 .num_pmcs = 4, 2108 .num_pmcs = 6,
2109 .oprofile_cpu_type = "ppc/e6500", 2109 .oprofile_cpu_type = "ppc/e6500",
2110 .oprofile_type = PPC_OPROFILE_FSL_EMB, 2110 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2111 .cpu_setup = __setup_cpu_e6500, 2111 .cpu_setup = __setup_cpu_e6500,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ea9414c8088d..55593ee2d5aa 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
1061 1061
1062static int __init eeh_init_proc(void) 1062static int __init eeh_init_proc(void)
1063{ 1063{
1064 if (machine_is(pseries)) 1064 if (machine_is(pseries) || machine_is(powernv))
1065 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); 1065 proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
1066 return 0; 1066 return 0;
1067} 1067}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d057ad..c04cdf70d487 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -102,7 +102,8 @@ BEGIN_FW_FTR_SECTION
102 /* if from user, see if there are any DTL entries to process */ 102 /* if from user, see if there are any DTL entries to process */
103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ 103 ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */
104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */ 104 ld r11,PACA_DTL_RIDX(r13) /* get log read index */
105 ld r10,LPPACA_DTLIDX(r10) /* get log write index */ 105 addi r10,r10,LPPACA_DTLIDX
106 LDX_BE r10,0,r10 /* get log write index */
106 cmpd cr1,r11,r10 107 cmpd cr1,r11,r10
107 beq+ cr1,33f 108 beq+ cr1,33f
108 bl .accumulate_stolen_time 109 bl .accumulate_stolen_time
@@ -449,15 +450,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
449 450
450#ifdef CONFIG_PPC_BOOK3S_64 451#ifdef CONFIG_PPC_BOOK3S_64
451BEGIN_FTR_SECTION 452BEGIN_FTR_SECTION
452 /*
453 * Back up the TAR across context switches. Note that the TAR is not
454 * available for use in the kernel. (To provide this, the TAR should
455 * be backed up/restored on exception entry/exit instead, and be in
456 * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
457 */
458 mfspr r0,SPRN_TAR
459 std r0,THREAD_TAR(r3)
460
461 /* Event based branch registers */ 453 /* Event based branch registers */
462 mfspr r0, SPRN_BESCR 454 mfspr r0, SPRN_BESCR
463 std r0, THREAD_BESCR(r3) 455 std r0, THREAD_BESCR(r3)
@@ -531,9 +523,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
531 */ 523 */
532 ld r9,PACA_SLBSHADOWPTR(r13) 524 ld r9,PACA_SLBSHADOWPTR(r13)
533 li r12,0 525 li r12,0
534 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 526 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
535 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 527 li r12,SLBSHADOW_STACKVSID
536 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 528 STDX_BE r7,r12,r9 /* Save VSID */
529 li r12,SLBSHADOW_STACKESID
530 STDX_BE r0,r12,r9 /* Save ESID */
537 531
538 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when 532 /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
539 * we have 1TB segments, the only CPUs known to have the errata 533 * we have 1TB segments, the only CPUs known to have the errata
@@ -586,7 +580,13 @@ BEGIN_FTR_SECTION
586 cmpwi r6,0 580 cmpwi r6,0
587 bne 1f 581 bne 1f
588 ld r0,0(r7) 582 ld r0,0(r7)
5891: cmpd r0,r25 5831:
584BEGIN_FTR_SECTION_NESTED(70)
585 mfspr r8, SPRN_FSCR
586 rldimi r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
587 mtspr SPRN_FSCR, r8
588END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
589 cmpd r0,r25
590 beq 2f 590 beq 2f
591 mtspr SPRN_DSCR,r0 591 mtspr SPRN_DSCR,r0
5922: 5922:
@@ -721,9 +721,9 @@ resume_kernel:
721 721
722 /* 722 /*
723 * Here we are preempting the current task. We want to make 723 * Here we are preempting the current task. We want to make
724 * sure we are soft-disabled first 724 * sure we are soft-disabled first and reconcile irq state.
725 */ 725 */
726 SOFT_DISABLE_INTS(r3,r4) 726 RECONCILE_IRQ_STATE(r3,r4)
7271: bl .preempt_schedule_irq 7271: bl .preempt_schedule_irq
728 728
729 /* Re-test flags and eventually loop */ 729 /* Re-test flags and eventually loop */
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index d44a571e45a7..6300c13bbde4 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -30,22 +30,20 @@ extern u32 epapr_ev_idle_start[];
30 30
31bool epapr_paravirt_enabled; 31bool epapr_paravirt_enabled;
32 32
33static int __init epapr_paravirt_init(void) 33static int __init early_init_dt_scan_epapr(unsigned long node,
34 const char *uname,
35 int depth, void *data)
34{ 36{
35 struct device_node *hyper_node;
36 const u32 *insts; 37 const u32 *insts;
37 int len, i; 38 unsigned long len;
39 int i;
38 40
39 hyper_node = of_find_node_by_path("/hypervisor"); 41 insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
40 if (!hyper_node)
41 return -ENODEV;
42
43 insts = of_get_property(hyper_node, "hcall-instructions", &len);
44 if (!insts) 42 if (!insts)
45 return -ENODEV; 43 return 0;
46 44
47 if (len % 4 || len > (4 * 4)) 45 if (len % 4 || len > (4 * 4))
48 return -ENODEV; 46 return -1;
49 47
50 for (i = 0; i < (len / 4); i++) { 48 for (i = 0; i < (len / 4); i++) {
51 patch_instruction(epapr_hypercall_start + i, insts[i]); 49 patch_instruction(epapr_hypercall_start + i, insts[i]);
@@ -55,13 +53,19 @@ static int __init epapr_paravirt_init(void)
55 } 53 }
56 54
57#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64) 55#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
58 if (of_get_property(hyper_node, "has-idle", NULL)) 56 if (of_get_flat_dt_prop(node, "has-idle", NULL))
59 ppc_md.power_save = epapr_ev_idle; 57 ppc_md.power_save = epapr_ev_idle;
60#endif 58#endif
61 59
62 epapr_paravirt_enabled = true; 60 epapr_paravirt_enabled = true;
63 61
62 return 1;
63}
64
65int __init epapr_paravirt_early_init(void)
66{
67 of_scan_flat_dt(early_init_dt_scan_epapr, NULL);
68
64 return 0; 69 return 0;
65} 70}
66 71
67early_initcall(epapr_paravirt_init);
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 645170a07ada..2d067049db27 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -198,9 +198,9 @@ exc_##n##_common: \
198/* This second version is meant for exceptions that don't immediately 198/* This second version is meant for exceptions that don't immediately
199 * hard-enable. We set a bit in paca->irq_happened to ensure that 199 * hard-enable. We set a bit in paca->irq_happened to ensure that
200 * a subsequent call to arch_local_irq_restore() will properly 200 * a subsequent call to arch_local_irq_restore() will properly
201 * hard-enable and avoid the fast-path 201 * hard-enable and avoid the fast-path, and then reconcile irq state.
202 */ 202 */
203#define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4) 203#define INTS_DISABLE RECONCILE_IRQ_STATE(r3,r4)
204 204
205/* This is called by exceptions that used INTS_KEEP (that did not touch 205/* This is called by exceptions that used INTS_KEEP (that did not touch
206 * irq indicators in the PACA). This will restore MSR:EE to it's previous 206 * irq indicators in the PACA). This will restore MSR:EE to it's previous
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d223b2e3..3a9ed6ac224b 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -293,27 +293,31 @@ system_call_pSeries:
293 * out of line to handle them 293 * out of line to handle them
294 */ 294 */
295 . = 0xe00 295 . = 0xe00
296hv_exception_trampoline: 296hv_data_storage_trampoline:
297 SET_SCRATCH0(r13) 297 SET_SCRATCH0(r13)
298 EXCEPTION_PROLOG_0(PACA_EXGEN) 298 EXCEPTION_PROLOG_0(PACA_EXGEN)
299 b h_data_storage_hv 299 b h_data_storage_hv
300 300
301 . = 0xe20 301 . = 0xe20
302hv_instr_storage_trampoline:
302 SET_SCRATCH0(r13) 303 SET_SCRATCH0(r13)
303 EXCEPTION_PROLOG_0(PACA_EXGEN) 304 EXCEPTION_PROLOG_0(PACA_EXGEN)
304 b h_instr_storage_hv 305 b h_instr_storage_hv
305 306
306 . = 0xe40 307 . = 0xe40
308emulation_assist_trampoline:
307 SET_SCRATCH0(r13) 309 SET_SCRATCH0(r13)
308 EXCEPTION_PROLOG_0(PACA_EXGEN) 310 EXCEPTION_PROLOG_0(PACA_EXGEN)
309 b emulation_assist_hv 311 b emulation_assist_hv
310 312
311 . = 0xe60 313 . = 0xe60
314hv_exception_trampoline:
312 SET_SCRATCH0(r13) 315 SET_SCRATCH0(r13)
313 EXCEPTION_PROLOG_0(PACA_EXGEN) 316 EXCEPTION_PROLOG_0(PACA_EXGEN)
314 b hmi_exception_hv 317 b hmi_exception_hv
315 318
316 . = 0xe80 319 . = 0xe80
320hv_doorbell_trampoline:
317 SET_SCRATCH0(r13) 321 SET_SCRATCH0(r13)
318 EXCEPTION_PROLOG_0(PACA_EXGEN) 322 EXCEPTION_PROLOG_0(PACA_EXGEN)
319 b h_doorbell_hv 323 b h_doorbell_hv
@@ -323,32 +327,32 @@ hv_exception_trampoline:
323 * prolog code of the PerformanceMonitor one. A little 327 * prolog code of the PerformanceMonitor one. A little
324 * trickery is thus necessary 328 * trickery is thus necessary
325 */ 329 */
326performance_monitor_pSeries_1:
327 . = 0xf00 330 . = 0xf00
331performance_monitor_pseries_trampoline:
328 SET_SCRATCH0(r13) 332 SET_SCRATCH0(r13)
329 EXCEPTION_PROLOG_0(PACA_EXGEN) 333 EXCEPTION_PROLOG_0(PACA_EXGEN)
330 b performance_monitor_pSeries 334 b performance_monitor_pSeries
331 335
332altivec_unavailable_pSeries_1:
333 . = 0xf20 336 . = 0xf20
337altivec_unavailable_pseries_trampoline:
334 SET_SCRATCH0(r13) 338 SET_SCRATCH0(r13)
335 EXCEPTION_PROLOG_0(PACA_EXGEN) 339 EXCEPTION_PROLOG_0(PACA_EXGEN)
336 b altivec_unavailable_pSeries 340 b altivec_unavailable_pSeries
337 341
338vsx_unavailable_pSeries_1:
339 . = 0xf40 342 . = 0xf40
343vsx_unavailable_pseries_trampoline:
340 SET_SCRATCH0(r13) 344 SET_SCRATCH0(r13)
341 EXCEPTION_PROLOG_0(PACA_EXGEN) 345 EXCEPTION_PROLOG_0(PACA_EXGEN)
342 b vsx_unavailable_pSeries 346 b vsx_unavailable_pSeries
343 347
344facility_unavailable_trampoline:
345 . = 0xf60 348 . = 0xf60
349facility_unavailable_trampoline:
346 SET_SCRATCH0(r13) 350 SET_SCRATCH0(r13)
347 EXCEPTION_PROLOG_0(PACA_EXGEN) 351 EXCEPTION_PROLOG_0(PACA_EXGEN)
348 b facility_unavailable_pSeries 352 b facility_unavailable_pSeries
349 353
350hv_facility_unavailable_trampoline:
351 . = 0xf80 354 . = 0xf80
355hv_facility_unavailable_trampoline:
352 SET_SCRATCH0(r13) 356 SET_SCRATCH0(r13)
353 EXCEPTION_PROLOG_0(PACA_EXGEN) 357 EXCEPTION_PROLOG_0(PACA_EXGEN)
354 b facility_unavailable_hv 358 b facility_unavailable_hv
@@ -367,11 +371,7 @@ denorm_exception_hv:
367 HMT_MEDIUM_PPR_DISCARD 371 HMT_MEDIUM_PPR_DISCARD
368 mtspr SPRN_SPRG_HSCRATCH0,r13 372 mtspr SPRN_SPRG_HSCRATCH0,r13
369 EXCEPTION_PROLOG_0(PACA_EXGEN) 373 EXCEPTION_PROLOG_0(PACA_EXGEN)
370 std r11,PACA_EXGEN+EX_R11(r13) 374 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
371 std r12,PACA_EXGEN+EX_R12(r13)
372 mfspr r9,SPRN_SPRG_HSCRATCH0
373 std r9,PACA_EXGEN+EX_R13(r13)
374 mfcr r9
375 375
376#ifdef CONFIG_PPC_DENORMALISATION 376#ifdef CONFIG_PPC_DENORMALISATION
377 mfspr r10,SPRN_HSRR1 377 mfspr r10,SPRN_HSRR1
@@ -381,6 +381,7 @@ denorm_exception_hv:
381 bne+ denorm_assist 381 bne+ denorm_assist
382#endif 382#endif
383 383
384 KVMTEST(0x1500)
384 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV) 385 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
385 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500) 386 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
386 387
@@ -501,6 +502,10 @@ denorm_done:
501 mtcrf 0x80,r9 502 mtcrf 0x80,r9
502 ld r9,PACA_EXGEN+EX_R9(r13) 503 ld r9,PACA_EXGEN+EX_R9(r13)
503 RESTORE_PPR_PACA(PACA_EXGEN, r10) 504 RESTORE_PPR_PACA(PACA_EXGEN, r10)
505BEGIN_FTR_SECTION
506 ld r10,PACA_EXGEN+EX_CFAR(r13)
507 mtspr SPRN_CFAR,r10
508END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
504 ld r10,PACA_EXGEN+EX_R10(r13) 509 ld r10,PACA_EXGEN+EX_R10(r13)
505 ld r11,PACA_EXGEN+EX_R11(r13) 510 ld r11,PACA_EXGEN+EX_R11(r13)
506 ld r12,PACA_EXGEN+EX_R12(r13) 511 ld r12,PACA_EXGEN+EX_R12(r13)
@@ -808,6 +813,7 @@ system_call_relon_pSeries:
808 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 813 b . /* Can't happen, see v2.07 Book III-S section 6.5 */
809 814
810 . = 0x4e40 815 . = 0x4e40
816emulation_assist_relon_trampoline:
811 SET_SCRATCH0(r13) 817 SET_SCRATCH0(r13)
812 EXCEPTION_PROLOG_0(PACA_EXGEN) 818 EXCEPTION_PROLOG_0(PACA_EXGEN)
813 b emulation_assist_relon_hv 819 b emulation_assist_relon_hv
@@ -816,39 +822,40 @@ system_call_relon_pSeries:
816 b . /* Can't happen, see v2.07 Book III-S section 6.5 */ 822 b . /* Can't happen, see v2.07 Book III-S section 6.5 */
817 823
818 . = 0x4e80 824 . = 0x4e80
825h_doorbell_relon_trampoline:
819 SET_SCRATCH0(r13) 826 SET_SCRATCH0(r13)
820 EXCEPTION_PROLOG_0(PACA_EXGEN) 827 EXCEPTION_PROLOG_0(PACA_EXGEN)
821 b h_doorbell_relon_hv 828 b h_doorbell_relon_hv
822 829
823performance_monitor_relon_pSeries_1:
824 . = 0x4f00 830 . = 0x4f00
831performance_monitor_relon_pseries_trampoline:
825 SET_SCRATCH0(r13) 832 SET_SCRATCH0(r13)
826 EXCEPTION_PROLOG_0(PACA_EXGEN) 833 EXCEPTION_PROLOG_0(PACA_EXGEN)
827 b performance_monitor_relon_pSeries 834 b performance_monitor_relon_pSeries
828 835
829altivec_unavailable_relon_pSeries_1:
830 . = 0x4f20 836 . = 0x4f20
837altivec_unavailable_relon_pseries_trampoline:
831 SET_SCRATCH0(r13) 838 SET_SCRATCH0(r13)
832 EXCEPTION_PROLOG_0(PACA_EXGEN) 839 EXCEPTION_PROLOG_0(PACA_EXGEN)
833 b altivec_unavailable_relon_pSeries 840 b altivec_unavailable_relon_pSeries
834 841
835vsx_unavailable_relon_pSeries_1:
836 . = 0x4f40 842 . = 0x4f40
843vsx_unavailable_relon_pseries_trampoline:
837 SET_SCRATCH0(r13) 844 SET_SCRATCH0(r13)
838 EXCEPTION_PROLOG_0(PACA_EXGEN) 845 EXCEPTION_PROLOG_0(PACA_EXGEN)
839 b vsx_unavailable_relon_pSeries 846 b vsx_unavailable_relon_pSeries
840 847
841facility_unavailable_relon_trampoline:
842 . = 0x4f60 848 . = 0x4f60
849facility_unavailable_relon_trampoline:
843 SET_SCRATCH0(r13) 850 SET_SCRATCH0(r13)
844 EXCEPTION_PROLOG_0(PACA_EXGEN) 851 EXCEPTION_PROLOG_0(PACA_EXGEN)
845 b facility_unavailable_relon_pSeries 852 b facility_unavailable_relon_pSeries
846 853
847hv_facility_unavailable_relon_trampoline:
848 . = 0x4f80 854 . = 0x4f80
855hv_facility_unavailable_relon_trampoline:
849 SET_SCRATCH0(r13) 856 SET_SCRATCH0(r13)
850 EXCEPTION_PROLOG_0(PACA_EXGEN) 857 EXCEPTION_PROLOG_0(PACA_EXGEN)
851 b facility_unavailable_relon_hv 858 b hv_facility_unavailable_relon_hv
852 859
853 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) 860 STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
854#ifdef CONFIG_PPC_DENORMALISATION 861#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1182,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1175 b .ret_from_except 1182 b .ret_from_except
1176 1183
1177 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) 1184 STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
1185 STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
1178 1186
1179 .align 7 1187 .align 7
1180 .globl __end_handlers 1188 .globl __end_handlers
@@ -1188,7 +1196,7 @@ __end_handlers:
1188 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) 1196 STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1189 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) 1197 STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1190 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) 1198 STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1191 STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) 1199 STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1192 1200
1193#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) 1201#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1194/* 1202/*
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 8a9b6f59822d..67ee0d6c1070 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -822,14 +822,6 @@ finish_tlb_load:
822 rfi /* Should sync shadow TLBs */ 822 rfi /* Should sync shadow TLBs */
823 b . /* prevent prefetch past rfi */ 823 b . /* prevent prefetch past rfi */
824 824
825/* extern void giveup_fpu(struct task_struct *prev)
826 *
827 * The PowerPC 4xx family of processors do not have an FPU, so this just
828 * returns.
829 */
830_ENTRY(giveup_fpu)
831 blr
832
833/* This is where the main kernel code starts. 825/* This is where the main kernel code starts.
834 */ 826 */
835start_here: 827start_here:
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 97e2671cde7f..c334f53453f7 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -784,16 +784,6 @@ _GLOBAL(__fixup_440A_mcheck)
784 sync 784 sync
785 blr 785 blr
786 786
787/*
788 * extern void giveup_fpu(struct task_struct *prev)
789 *
790 * The 44x core does not have an FPU.
791 */
792#ifndef CONFIG_PPC_FPU
793_GLOBAL(giveup_fpu)
794 blr
795#endif
796
797_GLOBAL(set_context) 787_GLOBAL(set_context)
798 788
799#ifdef CONFIG_BDI_SWITCH 789#ifdef CONFIG_BDI_SWITCH
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index b61363d557b5..3d11d8038dee 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -703,6 +703,7 @@ _GLOBAL(relative_toc)
703 mtlr r0 703 mtlr r0
704 blr 704 blr
705 705
706.balign 8
706p_toc: .llong __toc_start + 0x8000 - 0b 707p_toc: .llong __toc_start + 0x8000 - 0b
707 708
708/* 709/*
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b2a5860accfb..1b92a97b1b04 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -691,10 +691,6 @@ modified_instr:
691 b 151b 691 b 151b
692#endif 692#endif
693 693
694 .globl giveup_fpu
695giveup_fpu:
696 blr
697
698/* 694/*
699 * This is where the main kernel code starts. 695 * This is where the main kernel code starts.
700 */ 696 */
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index d10a7cacccd2..289afaffbbb5 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -948,16 +948,6 @@ _GLOBAL(giveup_spe)
948#endif /* CONFIG_SPE */ 948#endif /* CONFIG_SPE */
949 949
950/* 950/*
951 * extern void giveup_fpu(struct task_struct *prev)
952 *
953 * Not all FSL Book-E cores have an FPU
954 */
955#ifndef CONFIG_PPC_FPU
956_GLOBAL(giveup_fpu)
957 blr
958#endif
959
960/*
961 * extern void abort(void) 951 * extern void abort(void)
962 * 952 *
963 * At present, this routine just applies a system reset. 953 * At present, this routine just applies a system reset.
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index fa0b54b2a362..24b968f8e4d8 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -53,6 +53,7 @@ static struct iowa_bus *iowa_pci_find(unsigned long vaddr, unsigned long paddr)
53 return NULL; 53 return NULL;
54} 54}
55 55
56#ifdef CONFIG_PPC_INDIRECT_MMIO
56struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr) 57struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
57{ 58{
58 unsigned hugepage_shift; 59 unsigned hugepage_shift;
@@ -90,13 +91,25 @@ struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
90 91
91 return bus; 92 return bus;
92} 93}
94#else /* CONFIG_PPC_INDIRECT_MMIO */
95struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR addr)
96{
97 return NULL;
98}
99#endif /* !CONFIG_PPC_INDIRECT_MMIO */
93 100
101#ifdef CONFIG_PPC_INDIRECT_PIO
94struct iowa_bus *iowa_pio_find_bus(unsigned long port) 102struct iowa_bus *iowa_pio_find_bus(unsigned long port)
95{ 103{
96 unsigned long vaddr = (unsigned long)pci_io_base + port; 104 unsigned long vaddr = (unsigned long)pci_io_base + port;
97 return iowa_pci_find(vaddr, 0); 105 return iowa_pci_find(vaddr, 0);
98} 106}
99 107#else
108struct iowa_bus *iowa_pio_find_bus(unsigned long port)
109{
110 return NULL;
111}
112#endif
100 113
101#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ 114#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
102static ret iowa_##name at \ 115static ret iowa_##name at \
@@ -137,6 +150,7 @@ static const struct ppc_pci_io iowa_pci_io = {
137 150
138}; 151};
139 152
153#ifdef CONFIG_PPC_INDIRECT_MMIO
140static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, 154static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
141 unsigned long flags, void *caller) 155 unsigned long flags, void *caller)
142{ 156{
@@ -151,6 +165,9 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
151 } 165 }
152 return res; 166 return res;
153} 167}
168#else /* CONFIG_PPC_INDIRECT_MMIO */
169#define iowa_ioremap NULL
170#endif /* !CONFIG_PPC_INDIRECT_MMIO */
154 171
155/* Enable IO workaround */ 172/* Enable IO workaround */
156static void io_workaround_init(void) 173static void io_workaround_init(void)
diff --git a/arch/powerpc/kernel/io.c b/arch/powerpc/kernel/io.c
index 886381f32c3d..2a2b4aeab80f 100644
--- a/arch/powerpc/kernel/io.c
+++ b/arch/powerpc/kernel/io.c
@@ -25,6 +25,9 @@
25#include <asm/firmware.h> 25#include <asm/firmware.h>
26#include <asm/bug.h> 26#include <asm/bug.h>
27 27
28/* See definition in io.h */
29bool isa_io_special;
30
28void _insb(const volatile u8 __iomem *port, void *buf, long count) 31void _insb(const volatile u8 __iomem *port, void *buf, long count)
29{ 32{
30 u8 *tbuf = buf; 33 u8 *tbuf = buf;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index b20ff173a671..0adab06ce5c0 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -105,7 +105,7 @@ static int __init fail_iommu_debugfs(void)
105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu", 105 struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
106 NULL, &fail_iommu); 106 NULL, &fail_iommu);
107 107
108 return PTR_RET(dir); 108 return PTR_ERR_OR_ZERO(dir);
109} 109}
110late_initcall(fail_iommu_debugfs); 110late_initcall(fail_iommu_debugfs);
111 111
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 0733b05eb856..22e88dd2f34a 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -99,7 +99,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index,
99 legacy_serial_count = index + 1; 99 legacy_serial_count = index + 1;
100 100
101 /* Check if there is a port who already claimed our slot */ 101 /* Check if there is a port who already claimed our slot */
102 if (legacy_serial_infos[index].np != 0) { 102 if (legacy_serial_infos[index].np != NULL) {
103 /* if we still have some room, move it, else override */ 103 /* if we still have some room, move it, else override */
104 if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { 104 if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) {
105 printk(KERN_DEBUG "Moved legacy port %d -> %d\n", 105 printk(KERN_DEBUG "Moved legacy port %d -> %d\n",
@@ -152,7 +152,7 @@ static int __init add_legacy_soc_port(struct device_node *np,
152 struct device_node *soc_dev) 152 struct device_node *soc_dev)
153{ 153{
154 u64 addr; 154 u64 addr;
155 const u32 *addrp; 155 const __be32 *addrp;
156 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ 156 upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ
157 | UPF_FIXED_PORT; 157 | UPF_FIXED_PORT;
158 struct device_node *tsi = of_get_parent(np); 158 struct device_node *tsi = of_get_parent(np);
@@ -221,14 +221,19 @@ static int __init add_legacy_isa_port(struct device_node *np,
221 /* Translate ISA address. If it fails, we still register the port 221 /* Translate ISA address. If it fails, we still register the port
222 * with no translated address so that it can be picked up as an IO 222 * with no translated address so that it can be picked up as an IO
223 * port later by the serial driver 223 * port later by the serial driver
224 *
225 * Note: Don't even try on P8 lpc, we know it's not directly mapped
224 */ 226 */
225 taddr = of_translate_address(np, reg); 227 if (!of_device_is_compatible(isa_brg, "ibm,power8-lpc")) {
226 if (taddr == OF_BAD_ADDR) 228 taddr = of_translate_address(np, reg);
229 if (taddr == OF_BAD_ADDR)
230 taddr = 0;
231 } else
227 taddr = 0; 232 taddr = 0;
228 233
229 /* Add port, irq will be dealt with later */ 234 /* Add port, irq will be dealt with later */
230 return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr, 235 return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]),
231 NO_IRQ, UPF_BOOT_AUTOCONF, 0); 236 taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0);
232 237
233} 238}
234 239
@@ -237,7 +242,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
237 struct device_node *pci_dev) 242 struct device_node *pci_dev)
238{ 243{
239 u64 addr, base; 244 u64 addr, base;
240 const u32 *addrp; 245 const __be32 *addrp;
241 unsigned int flags; 246 unsigned int flags;
242 int iotype, index = -1, lindex = 0; 247 int iotype, index = -1, lindex = 0;
243 248
@@ -270,7 +275,7 @@ static int __init add_legacy_pci_port(struct device_node *np,
270 if (iotype == UPIO_MEM) 275 if (iotype == UPIO_MEM)
271 base = addr; 276 base = addr;
272 else 277 else
273 base = addrp[2]; 278 base = of_read_number(&addrp[2], 1);
274 279
275 /* Try to guess an index... If we have subdevices of the pci dev, 280 /* Try to guess an index... If we have subdevices of the pci dev,
276 * we get to their "reg" property 281 * we get to their "reg" property
@@ -307,19 +312,31 @@ static int __init add_legacy_pci_port(struct device_node *np,
307 312
308static void __init setup_legacy_serial_console(int console) 313static void __init setup_legacy_serial_console(int console)
309{ 314{
310 struct legacy_serial_info *info = 315 struct legacy_serial_info *info = &legacy_serial_infos[console];
311 &legacy_serial_infos[console]; 316 struct plat_serial8250_port *port = &legacy_serial_ports[console];
312 void __iomem *addr; 317 void __iomem *addr;
313 318
314 if (info->taddr == 0) 319 /* Check if a translated MMIO address has been found */
315 return; 320 if (info->taddr) {
316 addr = ioremap(info->taddr, 0x1000); 321 addr = ioremap(info->taddr, 0x1000);
317 if (addr == NULL) 322 if (addr == NULL)
318 return; 323 return;
324 udbg_uart_init_mmio(addr, 1);
325 } else {
326 /* Check if it's PIO and we support untranslated PIO */
327 if (port->iotype == UPIO_PORT && isa_io_special)
328 udbg_uart_init_pio(port->iobase, 1);
329 else
330 return;
331 }
332
333 /* Try to query the current speed */
319 if (info->speed == 0) 334 if (info->speed == 0)
320 info->speed = udbg_probe_uart_speed(addr, info->clock); 335 info->speed = udbg_probe_uart_speed(info->clock);
336
337 /* Set it up */
321 DBG("default console speed = %d\n", info->speed); 338 DBG("default console speed = %d\n", info->speed);
322 udbg_init_uart(addr, info->speed, info->clock); 339 udbg_uart_setup(info->speed, info->clock);
323} 340}
324 341
325/* 342/*
@@ -367,10 +384,13 @@ void __init find_legacy_serial_ports(void)
367 /* Next, fill our array with ISA ports */ 384 /* Next, fill our array with ISA ports */
368 for_each_node_by_type(np, "serial") { 385 for_each_node_by_type(np, "serial") {
369 struct device_node *isa = of_get_parent(np); 386 struct device_node *isa = of_get_parent(np);
370 if (isa && !strcmp(isa->name, "isa")) { 387 if (isa && (!strcmp(isa->name, "isa") ||
371 index = add_legacy_isa_port(np, isa); 388 !strcmp(isa->name, "lpc"))) {
372 if (index >= 0 && np == stdout) 389 if (of_device_is_available(np)) {
373 legacy_serial_console = index; 390 index = add_legacy_isa_port(np, isa);
391 if (index >= 0 && np == stdout)
392 legacy_serial_console = index;
393 }
374 } 394 }
375 of_node_put(isa); 395 of_node_put(isa);
376 } 396 }
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index e469f30e6eeb..777d999f563b 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -327,8 +327,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
327 * 327 *
328 * flush_icache_range(unsigned long start, unsigned long stop) 328 * flush_icache_range(unsigned long start, unsigned long stop)
329 */ 329 */
330_KPROBE(__flush_icache_range) 330_KPROBE(flush_icache_range)
331BEGIN_FTR_SECTION 331BEGIN_FTR_SECTION
332 isync
332 blr /* for 601, do nothing */ 333 blr /* for 601, do nothing */
333END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) 334END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
334 li r5,L1_CACHE_BYTES-1 335 li r5,L1_CACHE_BYTES-1
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 6820e45f557b..971d7e78aff2 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -67,8 +67,10 @@ PPC64_CACHES:
67 * flush all bytes from start through stop-1 inclusive 67 * flush all bytes from start through stop-1 inclusive
68 */ 68 */
69 69
70_KPROBE(__flush_icache_range) 70_KPROBE(flush_icache_range)
71 71BEGIN_FTR_SECTION
72 blr
73END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
72/* 74/*
73 * Flush the data cache to memory 75 * Flush the data cache to memory
74 * 76 *
@@ -247,6 +249,37 @@ _GLOBAL(__bswapdi2)
247 blr 249 blr
248 250
249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 251#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
252
253_GLOBAL(rmci_on)
254 sync
255 isync
256 li r3,0x100
257 rldicl r3,r3,32,0
258 mfspr r5,SPRN_HID4
259 or r5,r5,r3
260 sync
261 mtspr SPRN_HID4,r5
262 isync
263 slbia
264 isync
265 sync
266 blr
267
268_GLOBAL(rmci_off)
269 sync
270 isync
271 li r3,0x100
272 rldicl r3,r3,32,0
273 mfspr r5,SPRN_HID4
274 andc r5,r5,r3
275 sync
276 mtspr SPRN_HID4,r5
277 isync
278 slbia
279 isync
280 sync
281 blr
282
250/* 283/*
251 * Do an IO access in real mode 284 * Do an IO access in real mode
252 */ 285 */
@@ -416,19 +449,6 @@ _GLOBAL(scom970_write)
416 blr 449 blr
417#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */ 450#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
418 451
419
420/*
421 * disable_kernel_fp()
422 * Disable the FPU.
423 */
424_GLOBAL(disable_kernel_fp)
425 mfmsr r3
426 rldicl r0,r3,(63-MSR_FP_LG),1
427 rldicl r3,r0,(MSR_FP_LG+1),0
428 mtmsrd r3 /* disable use of fpu now */
429 isync
430 blr
431
432/* kexec_wait(phys_cpu) 452/* kexec_wait(phys_cpu)
433 * 453 *
434 * wait for the flag to change, indicating this kernel is going away but 454 * wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index f8f24685f10a..3fc16e3beb9f 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -34,10 +34,10 @@ extern unsigned long __toc_start;
34 */ 34 */
35struct lppaca lppaca[] = { 35struct lppaca lppaca[] = {
36 [0 ... (NR_LPPACAS-1)] = { 36 [0 ... (NR_LPPACAS-1)] = {
37 .desc = 0xd397d781, /* "LpPa" */ 37 .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
38 .size = sizeof(struct lppaca), 38 .size = cpu_to_be16(sizeof(struct lppaca)),
39 .fpregs_in_use = 1, 39 .fpregs_in_use = 1,
40 .slb_count = 64, 40 .slb_count = cpu_to_be16(64),
41 .vmxregs_in_use = 0, 41 .vmxregs_in_use = 0,
42 .page_ins = 0, 42 .page_ins = 0,
43 }, 43 },
@@ -101,8 +101,8 @@ static inline void free_lppacas(void) { }
101 */ 101 */
102struct slb_shadow slb_shadow[] __cacheline_aligned = { 102struct slb_shadow slb_shadow[] __cacheline_aligned = {
103 [0 ... (NR_CPUS-1)] = { 103 [0 ... (NR_CPUS-1)] = {
104 .persistent = SLB_NUM_BOLTED, 104 .persistent = cpu_to_be32(SLB_NUM_BOLTED),
105 .buffer_length = sizeof(struct slb_shadow), 105 .buffer_length = cpu_to_be32(sizeof(struct slb_shadow)),
106 }, 106 },
107}; 107};
108 108
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7d22a675fe1a..905a24bb7acc 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -306,7 +306,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
306 unsigned long io_offset = 0; 306 unsigned long io_offset = 0;
307 int i, res_bit; 307 int i, res_bit;
308 308
309 if (hose == 0) 309 if (hose == NULL)
310 return NULL; /* should never happen */ 310 return NULL; /* should never happen */
311 311
312 /* If memory, add on the PCI bridge address offset */ 312 /* If memory, add on the PCI bridge address offset */
@@ -667,7 +667,7 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
667void pci_process_bridge_OF_ranges(struct pci_controller *hose, 667void pci_process_bridge_OF_ranges(struct pci_controller *hose,
668 struct device_node *dev, int primary) 668 struct device_node *dev, int primary)
669{ 669{
670 const u32 *ranges; 670 const __be32 *ranges;
671 int rlen; 671 int rlen;
672 int pna = of_n_addr_cells(dev); 672 int pna = of_n_addr_cells(dev);
673 int np = pna + 5; 673 int np = pna + 5;
@@ -687,7 +687,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
687 /* Parse it */ 687 /* Parse it */
688 while ((rlen -= np * 4) >= 0) { 688 while ((rlen -= np * 4) >= 0) {
689 /* Read next ranges element */ 689 /* Read next ranges element */
690 pci_space = ranges[0]; 690 pci_space = of_read_number(ranges, 1);
691 pci_addr = of_read_number(ranges + 1, 2); 691 pci_addr = of_read_number(ranges + 1, 2);
692 cpu_addr = of_translate_address(dev, ranges + 3); 692 cpu_addr = of_translate_address(dev, ranges + 3);
693 size = of_read_number(ranges + pna + 3, 2); 693 size = of_read_number(ranges + pna + 3, 2);
@@ -704,7 +704,7 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
704 /* Now consume following elements while they are contiguous */ 704 /* Now consume following elements while they are contiguous */
705 for (; rlen >= np * sizeof(u32); 705 for (; rlen >= np * sizeof(u32);
706 ranges += np, rlen -= np * 4) { 706 ranges += np, rlen -= np * 4) {
707 if (ranges[0] != pci_space) 707 if (of_read_number(ranges, 1) != pci_space)
708 break; 708 break;
709 pci_next = of_read_number(ranges + 1, 2); 709 pci_next = of_read_number(ranges + 1, 2);
710 cpu_next = of_translate_address(dev, ranges + 3); 710 cpu_next = of_translate_address(dev, ranges + 3);
@@ -1055,8 +1055,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
1055 * bases. This is -not- called when generating the PCI tree from 1055 * bases. This is -not- called when generating the PCI tree from
1056 * the OF device-tree. 1056 * the OF device-tree.
1057 */ 1057 */
1058 if (bus->self != NULL) 1058 pci_read_bridge_bases(bus);
1059 pci_read_bridge_bases(bus);
1060 1059
1061 /* Now fixup the bus bus */ 1060 /* Now fixup the bus bus */
1062 pcibios_setup_bus_self(bus); 1061 pcibios_setup_bus_self(bus);
@@ -1578,7 +1577,7 @@ fake_pci_bus(struct pci_controller *hose, int busnr)
1578{ 1577{
1579 static struct pci_bus bus; 1578 static struct pci_bus bus;
1580 1579
1581 if (hose == 0) { 1580 if (hose == NULL) {
1582 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr); 1581 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1583 } 1582 }
1584 bus.number = busnr; 1583 bus.number = busnr;
@@ -1674,12 +1673,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
1674 /* Configure PCI Express settings */ 1673 /* Configure PCI Express settings */
1675 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { 1674 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1676 struct pci_bus *child; 1675 struct pci_bus *child;
1677 list_for_each_entry(child, &bus->children, node) { 1676 list_for_each_entry(child, &bus->children, node)
1678 struct pci_dev *self = child->self; 1677 pcie_bus_configure_settings(child);
1679 if (!self)
1680 continue;
1681 pcie_bus_configure_settings(child, self->pcie_mpss);
1682 }
1683 } 1678 }
1684} 1679}
1685 1680
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 2e8629654ca8..a9e311f7a9dd 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -109,7 +109,7 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
109 hose = pci_bus_to_host(bus); 109 hose = pci_bus_to_host(bus);
110 110
111 /* Check if we have IOs allocated */ 111 /* Check if we have IOs allocated */
112 if (hose->io_base_alloc == 0) 112 if (hose->io_base_alloc == NULL)
113 return 0; 113 return 0;
114 114
115 pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); 115 pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name);
@@ -272,7 +272,7 @@ static void quirk_radeon_32bit_msi(struct pci_dev *dev)
272 struct pci_dn *pdn = pci_get_pdn(dev); 272 struct pci_dn *pdn = pci_get_pdn(dev);
273 273
274 if (pdn) 274 if (pdn)
275 pdn->force_32bit_msi = 1; 275 pdn->force_32bit_msi = true;
276} 276}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); 277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); 278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index df038442548a..1f61fab59d9b 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -47,9 +47,8 @@ struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
47void *update_dn_pci_info(struct device_node *dn, void *data) 47void *update_dn_pci_info(struct device_node *dn, void *data)
48{ 48{
49 struct pci_controller *phb = data; 49 struct pci_controller *phb = data;
50 const int *type = 50 const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
51 of_get_property(dn, "ibm,pci-config-space-type", NULL); 51 const __be32 *regs;
52 const u32 *regs;
53 struct pci_dn *pdn; 52 struct pci_dn *pdn;
54 53
55 pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); 54 pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -63,12 +62,14 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
63#endif 62#endif
64 regs = of_get_property(dn, "reg", NULL); 63 regs = of_get_property(dn, "reg", NULL);
65 if (regs) { 64 if (regs) {
65 u32 addr = of_read_number(regs, 1);
66
66 /* First register entry is addr (00BBSS00) */ 67 /* First register entry is addr (00BBSS00) */
67 pdn->busno = (regs[0] >> 16) & 0xff; 68 pdn->busno = (addr >> 16) & 0xff;
68 pdn->devfn = (regs[0] >> 8) & 0xff; 69 pdn->devfn = (addr >> 8) & 0xff;
69 } 70 }
70 71
71 pdn->pci_ext_config_space = (type && *type == 1); 72 pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
72 return NULL; 73 return NULL;
73} 74}
74 75
@@ -98,12 +99,13 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
98 99
99 /* We started with a phb, iterate all childs */ 100 /* We started with a phb, iterate all childs */
100 for (dn = start->child; dn; dn = nextdn) { 101 for (dn = start->child; dn; dn = nextdn) {
101 const u32 *classp; 102 const __be32 *classp;
102 u32 class; 103 u32 class = 0;
103 104
104 nextdn = NULL; 105 nextdn = NULL;
105 classp = of_get_property(dn, "class-code", NULL); 106 classp = of_get_property(dn, "class-code", NULL);
106 class = classp ? *classp : 0; 107 if (classp)
108 class = of_read_number(classp, 1);
107 109
108 if (pre && ((ret = pre(dn, data)) != NULL)) 110 if (pre && ((ret = pre(dn, data)) != NULL))
109 return ret; 111 return ret;
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 15d9105323bf..4368ec6fdc8c 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -24,12 +24,12 @@
24 */ 24 */
25static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 25static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
26{ 26{
27 const u32 *prop; 27 const __be32 *prop;
28 int len; 28 int len;
29 29
30 prop = of_get_property(np, name, &len); 30 prop = of_get_property(np, name, &len);
31 if (prop && len >= 4) 31 if (prop && len >= 4)
32 return *prop; 32 return of_read_number(prop, 1);
33 return def; 33 return def;
34} 34}
35 35
@@ -77,7 +77,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
77 unsigned int flags; 77 unsigned int flags;
78 struct pci_bus_region region; 78 struct pci_bus_region region;
79 struct resource *res; 79 struct resource *res;
80 const u32 *addrs; 80 const __be32 *addrs;
81 u32 i; 81 u32 i;
82 int proplen; 82 int proplen;
83 83
@@ -86,14 +86,14 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
86 return; 86 return;
87 pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 87 pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs);
88 for (; proplen >= 20; proplen -= 20, addrs += 5) { 88 for (; proplen >= 20; proplen -= 20, addrs += 5) {
89 flags = pci_parse_of_flags(addrs[0], 0); 89 flags = pci_parse_of_flags(of_read_number(addrs, 1), 0);
90 if (!flags) 90 if (!flags)
91 continue; 91 continue;
92 base = of_read_number(&addrs[1], 2); 92 base = of_read_number(&addrs[1], 2);
93 size = of_read_number(&addrs[3], 2); 93 size = of_read_number(&addrs[3], 2);
94 if (!size) 94 if (!size)
95 continue; 95 continue;
96 i = addrs[0] & 0xff; 96 i = of_read_number(addrs, 1) & 0xff;
97 pr_debug(" base: %llx, size: %llx, i: %x\n", 97 pr_debug(" base: %llx, size: %llx, i: %x\n",
98 (unsigned long long)base, 98 (unsigned long long)base,
99 (unsigned long long)size, i); 99 (unsigned long long)size, i);
@@ -207,7 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
207{ 207{
208 struct device_node *node = dev->dev.of_node; 208 struct device_node *node = dev->dev.of_node;
209 struct pci_bus *bus; 209 struct pci_bus *bus;
210 const u32 *busrange, *ranges; 210 const __be32 *busrange, *ranges;
211 int len, i, mode; 211 int len, i, mode;
212 struct pci_bus_region region; 212 struct pci_bus_region region;
213 struct resource *res; 213 struct resource *res;
@@ -230,9 +230,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
230 return; 230 return;
231 } 231 }
232 232
233 bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]); 233 bus = pci_find_bus(pci_domain_nr(dev->bus),
234 of_read_number(busrange, 1));
234 if (!bus) { 235 if (!bus) {
235 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 236 bus = pci_add_new_bus(dev->bus, dev,
237 of_read_number(busrange, 1));
236 if (!bus) { 238 if (!bus) {
237 printk(KERN_ERR "Failed to create pci bus for %s\n", 239 printk(KERN_ERR "Failed to create pci bus for %s\n",
238 node->full_name); 240 node->full_name);
@@ -241,7 +243,8 @@ void of_scan_pci_bridge(struct pci_dev *dev)
241 } 243 }
242 244
243 bus->primary = dev->bus->number; 245 bus->primary = dev->bus->number;
244 pci_bus_insert_busn_res(bus, busrange[0], busrange[1]); 246 pci_bus_insert_busn_res(bus, of_read_number(busrange, 1),
247 of_read_number(busrange+1, 1));
245 bus->bridge_ctl = 0; 248 bus->bridge_ctl = 0;
246 249
247 /* parse ranges property */ 250 /* parse ranges property */
@@ -254,7 +257,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
254 } 257 }
255 i = 1; 258 i = 1;
256 for (; len >= 32; len -= 32, ranges += 8) { 259 for (; len >= 32; len -= 32, ranges += 8) {
257 flags = pci_parse_of_flags(ranges[0], 1); 260 flags = pci_parse_of_flags(of_read_number(ranges, 1), 1);
258 size = of_read_number(&ranges[6], 2); 261 size = of_read_number(&ranges[6], 2);
259 if (flags == 0 || size == 0) 262 if (flags == 0 || size == 0)
260 continue; 263 continue;
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c29666586998..21646dbe1bb3 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -96,7 +96,9 @@ EXPORT_SYMBOL(pci_dram_offset);
96 96
97EXPORT_SYMBOL(start_thread); 97EXPORT_SYMBOL(start_thread);
98 98
99#ifdef CONFIG_PPC_FPU
99EXPORT_SYMBOL(giveup_fpu); 100EXPORT_SYMBOL(giveup_fpu);
101#endif
100#ifdef CONFIG_ALTIVEC 102#ifdef CONFIG_ALTIVEC
101EXPORT_SYMBOL(giveup_altivec); 103EXPORT_SYMBOL(giveup_altivec);
102#endif /* CONFIG_ALTIVEC */ 104#endif /* CONFIG_ALTIVEC */
@@ -111,7 +113,6 @@ EXPORT_SYMBOL(giveup_spe);
111#ifndef CONFIG_PPC64 113#ifndef CONFIG_PPC64
112EXPORT_SYMBOL(flush_instruction_cache); 114EXPORT_SYMBOL(flush_instruction_cache);
113#endif 115#endif
114EXPORT_SYMBOL(__flush_icache_range);
115EXPORT_SYMBOL(flush_dcache_range); 116EXPORT_SYMBOL(flush_dcache_range);
116 117
117#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe705fd..6f428da53e20 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -74,6 +74,7 @@ struct task_struct *last_task_used_vsx = NULL;
74struct task_struct *last_task_used_spe = NULL; 74struct task_struct *last_task_used_spe = NULL;
75#endif 75#endif
76 76
77#ifdef CONFIG_PPC_FPU
77/* 78/*
78 * Make sure the floating-point register state in the 79 * Make sure the floating-point register state in the
79 * the thread_struct is up to date for task tsk. 80 * the thread_struct is up to date for task tsk.
@@ -107,6 +108,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
107 } 108 }
108} 109}
109EXPORT_SYMBOL_GPL(flush_fp_to_thread); 110EXPORT_SYMBOL_GPL(flush_fp_to_thread);
111#endif
110 112
111void enable_kernel_fp(void) 113void enable_kernel_fp(void)
112{ 114{
@@ -600,6 +602,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
600 struct ppc64_tlb_batch *batch; 602 struct ppc64_tlb_batch *batch;
601#endif 603#endif
602 604
605 /* Back up the TAR across context switches.
606 * Note that the TAR is not available for use in the kernel. (To
607 * provide this, the TAR should be backed up/restored on exception
608 * entry/exit instead, and be in pt_regs. FIXME, this should be in
609 * pt_regs anyway (for debug).)
610 * Save the TAR here before we do treclaim/trecheckpoint as these
611 * will change the TAR.
612 */
613 save_tar(&prev->thread);
614
603 __switch_to_tm(prev); 615 __switch_to_tm(prev);
604 616
605#ifdef CONFIG_SMP 617#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index eb23ac92abb9..6bfcab97c981 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -215,16 +215,16 @@ static void __init check_cpu_pa_features(unsigned long node)
215#ifdef CONFIG_PPC_STD_MMU_64 215#ifdef CONFIG_PPC_STD_MMU_64
216static void __init check_cpu_slb_size(unsigned long node) 216static void __init check_cpu_slb_size(unsigned long node)
217{ 217{
218 u32 *slb_size_ptr; 218 __be32 *slb_size_ptr;
219 219
220 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL); 220 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
221 if (slb_size_ptr != NULL) { 221 if (slb_size_ptr != NULL) {
222 mmu_slb_size = *slb_size_ptr; 222 mmu_slb_size = be32_to_cpup(slb_size_ptr);
223 return; 223 return;
224 } 224 }
225 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL); 225 slb_size_ptr = of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
226 if (slb_size_ptr != NULL) { 226 if (slb_size_ptr != NULL) {
227 mmu_slb_size = *slb_size_ptr; 227 mmu_slb_size = be32_to_cpup(slb_size_ptr);
228 } 228 }
229} 229}
230#else 230#else
@@ -279,11 +279,11 @@ static void __init check_cpu_feature_properties(unsigned long node)
279{ 279{
280 unsigned long i; 280 unsigned long i;
281 struct feature_property *fp = feature_properties; 281 struct feature_property *fp = feature_properties;
282 const u32 *prop; 282 const __be32 *prop;
283 283
284 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { 284 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) {
285 prop = of_get_flat_dt_prop(node, fp->name, NULL); 285 prop = of_get_flat_dt_prop(node, fp->name, NULL);
286 if (prop && *prop >= fp->min_value) { 286 if (prop && be32_to_cpup(prop) >= fp->min_value) {
287 cur_cpu_spec->cpu_features |= fp->cpu_feature; 287 cur_cpu_spec->cpu_features |= fp->cpu_feature;
288 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; 288 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
289 } 289 }
@@ -295,8 +295,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
295 void *data) 295 void *data)
296{ 296{
297 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 297 char *type = of_get_flat_dt_prop(node, "device_type", NULL);
298 const u32 *prop; 298 const __be32 *prop;
299 const u32 *intserv; 299 const __be32 *intserv;
300 int i, nthreads; 300 int i, nthreads;
301 unsigned long len; 301 unsigned long len;
302 int found = -1; 302 int found = -1;
@@ -324,8 +324,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
324 * version 2 of the kexec param format adds the phys cpuid of 324 * version 2 of the kexec param format adds the phys cpuid of
325 * booted proc. 325 * booted proc.
326 */ 326 */
327 if (initial_boot_params->version >= 2) { 327 if (be32_to_cpu(initial_boot_params->version) >= 2) {
328 if (intserv[i] == initial_boot_params->boot_cpuid_phys) { 328 if (be32_to_cpu(intserv[i]) ==
329 be32_to_cpu(initial_boot_params->boot_cpuid_phys)) {
329 found = boot_cpu_count; 330 found = boot_cpu_count;
330 found_thread = i; 331 found_thread = i;
331 } 332 }
@@ -347,9 +348,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
347 348
348 if (found >= 0) { 349 if (found >= 0) {
349 DBG("boot cpu: logical %d physical %d\n", found, 350 DBG("boot cpu: logical %d physical %d\n", found,
350 intserv[found_thread]); 351 be32_to_cpu(intserv[found_thread]));
351 boot_cpuid = found; 352 boot_cpuid = found;
352 set_hard_smp_processor_id(found, intserv[found_thread]); 353 set_hard_smp_processor_id(found,
354 be32_to_cpu(intserv[found_thread]));
353 355
354 /* 356 /*
355 * PAPR defines "logical" PVR values for cpus that 357 * PAPR defines "logical" PVR values for cpus that
@@ -366,8 +368,8 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
366 * it uses 0x0f000001. 368 * it uses 0x0f000001.
367 */ 369 */
368 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 370 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
369 if (prop && (*prop & 0xff000000) == 0x0f000000) 371 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
370 identify_cpu(0, *prop); 372 identify_cpu(0, be32_to_cpup(prop));
371 373
372 identical_pvr_fixup(node); 374 identical_pvr_fixup(node);
373 } 375 }
@@ -389,7 +391,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
389int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, 391int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
390 int depth, void *data) 392 int depth, void *data)
391{ 393{
392 unsigned long *lprop; 394 unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
393 395
394 /* Use common scan routine to determine if this is the chosen node */ 396 /* Use common scan routine to determine if this is the chosen node */
395 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) 397 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
@@ -454,7 +456,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
454 if (dm == NULL || l < sizeof(__be32)) 456 if (dm == NULL || l < sizeof(__be32))
455 return 0; 457 return 0;
456 458
457 n = *dm++; /* number of entries */ 459 n = of_read_number(dm++, 1); /* number of entries */
458 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32)) 460 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(__be32))
459 return 0; 461 return 0;
460 462
@@ -466,7 +468,7 @@ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
466 468
467 for (; n != 0; --n) { 469 for (; n != 0; --n) {
468 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 470 base = dt_mem_next_cell(dt_root_addr_cells, &dm);
469 flags = dm[3]; 471 flags = of_read_number(&dm[3], 1);
470 /* skip DRC index, pad, assoc. list index, flags */ 472 /* skip DRC index, pad, assoc. list index, flags */
471 dm += 4; 473 dm += 4;
472 /* skip this block if the reserved bit is set in flags (0x80) 474 /* skip this block if the reserved bit is set in flags (0x80)
@@ -591,16 +593,16 @@ static void __init early_reserve_mem_dt(void)
591static void __init early_reserve_mem(void) 593static void __init early_reserve_mem(void)
592{ 594{
593 u64 base, size; 595 u64 base, size;
594 u64 *reserve_map; 596 __be64 *reserve_map;
595 unsigned long self_base; 597 unsigned long self_base;
596 unsigned long self_size; 598 unsigned long self_size;
597 599
598 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 600 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
599 initial_boot_params->off_mem_rsvmap); 601 be32_to_cpu(initial_boot_params->off_mem_rsvmap));
600 602
601 /* before we do anything, lets reserve the dt blob */ 603 /* before we do anything, lets reserve the dt blob */
602 self_base = __pa((unsigned long)initial_boot_params); 604 self_base = __pa((unsigned long)initial_boot_params);
603 self_size = initial_boot_params->totalsize; 605 self_size = be32_to_cpu(initial_boot_params->totalsize);
604 memblock_reserve(self_base, self_size); 606 memblock_reserve(self_base, self_size);
605 607
606 /* Look for the new "reserved-regions" property in the DT */ 608 /* Look for the new "reserved-regions" property in the DT */
@@ -620,15 +622,15 @@ static void __init early_reserve_mem(void)
620 * Handle the case where we might be booting from an old kexec 622 * Handle the case where we might be booting from an old kexec
621 * image that setup the mem_rsvmap as pairs of 32-bit values 623 * image that setup the mem_rsvmap as pairs of 32-bit values
622 */ 624 */
623 if (*reserve_map > 0xffffffffull) { 625 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
624 u32 base_32, size_32; 626 u32 base_32, size_32;
625 u32 *reserve_map_32 = (u32 *)reserve_map; 627 __be32 *reserve_map_32 = (__be32 *)reserve_map;
626 628
627 DBG("Found old 32-bit reserve map\n"); 629 DBG("Found old 32-bit reserve map\n");
628 630
629 while (1) { 631 while (1) {
630 base_32 = *(reserve_map_32++); 632 base_32 = be32_to_cpup(reserve_map_32++);
631 size_32 = *(reserve_map_32++); 633 size_32 = be32_to_cpup(reserve_map_32++);
632 if (size_32 == 0) 634 if (size_32 == 0)
633 break; 635 break;
634 /* skip if the reservation is for the blob */ 636 /* skip if the reservation is for the blob */
@@ -644,8 +646,8 @@ static void __init early_reserve_mem(void)
644 646
645 /* Handle the reserve map in the fdt blob if it exists */ 647 /* Handle the reserve map in the fdt blob if it exists */
646 while (1) { 648 while (1) {
647 base = *(reserve_map++); 649 base = be64_to_cpup(reserve_map++);
648 size = *(reserve_map++); 650 size = be64_to_cpup(reserve_map++);
649 if (size == 0) 651 if (size == 0)
650 break; 652 break;
651 DBG("reserving: %llx -> %llx\n", base, size); 653 DBG("reserving: %llx -> %llx\n", base, size);
@@ -795,6 +797,32 @@ struct device_node *of_find_next_cache_node(struct device_node *np)
795 return NULL; 797 return NULL;
796} 798}
797 799
800/**
801 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
802 * @np: device node of the device
803 *
804 * This looks for a property "ibm,chip-id" in the node or any
805 * of its parents and returns its content, or -1 if it cannot
806 * be found.
807 */
808int of_get_ibm_chip_id(struct device_node *np)
809{
810 of_node_get(np);
811 while(np) {
812 struct device_node *old = np;
813 const __be32 *prop;
814
815 prop = of_get_property(np, "ibm,chip-id", NULL);
816 if (prop) {
817 of_node_put(np);
818 return be32_to_cpup(prop);
819 }
820 np = of_get_parent(np);
821 of_node_put(old);
822 }
823 return -1;
824}
825
798#ifdef CONFIG_PPC_PSERIES 826#ifdef CONFIG_PPC_PSERIES
799/* 827/*
800 * Fix up the uninitialized fields in a new device node: 828 * Fix up the uninitialized fields in a new device node:
@@ -865,49 +893,10 @@ static int __init prom_reconfig_setup(void)
865__initcall(prom_reconfig_setup); 893__initcall(prom_reconfig_setup);
866#endif 894#endif
867 895
868/* Find the device node for a given logical cpu number, also returns the cpu 896bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
869 * local thread number (index in ibm,interrupt-server#s) if relevant and
870 * asked for (non NULL)
871 */
872struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
873{ 897{
874 int hardid; 898 return (int)phys_id == get_hard_smp_processor_id(cpu);
875 struct device_node *np;
876
877 hardid = get_hard_smp_processor_id(cpu);
878
879 for_each_node_by_type(np, "cpu") {
880 const u32 *intserv;
881 unsigned int plen, t;
882
883 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
884 * fallback to "reg" property and assume no threads
885 */
886 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
887 &plen);
888 if (intserv == NULL) {
889 const u32 *reg = of_get_property(np, "reg", NULL);
890 if (reg == NULL)
891 continue;
892 if (*reg == hardid) {
893 if (thread)
894 *thread = 0;
895 return np;
896 }
897 } else {
898 plen /= sizeof(u32);
899 for (t = 0; t < plen; t++) {
900 if (hardid == intserv[t]) {
901 if (thread)
902 *thread = t;
903 return np;
904 }
905 }
906 }
907 }
908 return NULL;
909} 899}
910EXPORT_SYMBOL(of_get_cpu_node);
911 900
912#if defined(CONFIG_DEBUG_FS) && defined(DEBUG) 901#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
913static struct debugfs_blob_wrapper flat_dt_blob; 902static struct debugfs_blob_wrapper flat_dt_blob;
@@ -917,7 +906,7 @@ static int __init export_flat_device_tree(void)
917 struct dentry *d; 906 struct dentry *d;
918 907
919 flat_dt_blob.data = initial_boot_params; 908 flat_dt_blob.data = initial_boot_params;
920 flat_dt_blob.size = initial_boot_params->totalsize; 909 flat_dt_blob.size = be32_to_cpu(initial_boot_params->totalsize);
921 910
922 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, 911 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
923 powerpc_debugfs_root, &flat_dt_blob); 912 powerpc_debugfs_root, &flat_dt_blob);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 607902424e73..7b6391b68fb8 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -107,10 +107,10 @@ int of_workarounds;
107typedef u32 prom_arg_t; 107typedef u32 prom_arg_t;
108 108
109struct prom_args { 109struct prom_args {
110 u32 service; 110 __be32 service;
111 u32 nargs; 111 __be32 nargs;
112 u32 nret; 112 __be32 nret;
113 prom_arg_t args[10]; 113 __be32 args[10];
114}; 114};
115 115
116struct prom_t { 116struct prom_t {
@@ -123,11 +123,11 @@ struct prom_t {
123}; 123};
124 124
125struct mem_map_entry { 125struct mem_map_entry {
126 u64 base; 126 __be64 base;
127 u64 size; 127 __be64 size;
128}; 128};
129 129
130typedef u32 cell_t; 130typedef __be32 cell_t;
131 131
132extern void __start(unsigned long r3, unsigned long r4, unsigned long r5, 132extern void __start(unsigned long r3, unsigned long r4, unsigned long r5,
133 unsigned long r6, unsigned long r7, unsigned long r8, 133 unsigned long r6, unsigned long r7, unsigned long r8,
@@ -219,13 +219,13 @@ static int __init call_prom(const char *service, int nargs, int nret, ...)
219 struct prom_args args; 219 struct prom_args args;
220 va_list list; 220 va_list list;
221 221
222 args.service = ADDR(service); 222 args.service = cpu_to_be32(ADDR(service));
223 args.nargs = nargs; 223 args.nargs = cpu_to_be32(nargs);
224 args.nret = nret; 224 args.nret = cpu_to_be32(nret);
225 225
226 va_start(list, nret); 226 va_start(list, nret);
227 for (i = 0; i < nargs; i++) 227 for (i = 0; i < nargs; i++)
228 args.args[i] = va_arg(list, prom_arg_t); 228 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
229 va_end(list); 229 va_end(list);
230 230
231 for (i = 0; i < nret; i++) 231 for (i = 0; i < nret; i++)
@@ -234,7 +234,7 @@ static int __init call_prom(const char *service, int nargs, int nret, ...)
234 if (enter_prom(&args, prom_entry) < 0) 234 if (enter_prom(&args, prom_entry) < 0)
235 return PROM_ERROR; 235 return PROM_ERROR;
236 236
237 return (nret > 0) ? args.args[nargs] : 0; 237 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
238} 238}
239 239
240static int __init call_prom_ret(const char *service, int nargs, int nret, 240static int __init call_prom_ret(const char *service, int nargs, int nret,
@@ -244,13 +244,13 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
244 struct prom_args args; 244 struct prom_args args;
245 va_list list; 245 va_list list;
246 246
247 args.service = ADDR(service); 247 args.service = cpu_to_be32(ADDR(service));
248 args.nargs = nargs; 248 args.nargs = cpu_to_be32(nargs);
249 args.nret = nret; 249 args.nret = cpu_to_be32(nret);
250 250
251 va_start(list, rets); 251 va_start(list, rets);
252 for (i = 0; i < nargs; i++) 252 for (i = 0; i < nargs; i++)
253 args.args[i] = va_arg(list, prom_arg_t); 253 args.args[i] = cpu_to_be32(va_arg(list, prom_arg_t));
254 va_end(list); 254 va_end(list);
255 255
256 for (i = 0; i < nret; i++) 256 for (i = 0; i < nret; i++)
@@ -261,9 +261,9 @@ static int __init call_prom_ret(const char *service, int nargs, int nret,
261 261
262 if (rets != NULL) 262 if (rets != NULL)
263 for (i = 1; i < nret; ++i) 263 for (i = 1; i < nret; ++i)
264 rets[i-1] = args.args[nargs+i]; 264 rets[i-1] = be32_to_cpu(args.args[nargs+i]);
265 265
266 return (nret > 0) ? args.args[nargs] : 0; 266 return (nret > 0) ? be32_to_cpu(args.args[nargs]) : 0;
267} 267}
268 268
269 269
@@ -527,7 +527,7 @@ static int __init prom_setprop(phandle node, const char *nodename,
527#define islower(c) ('a' <= (c) && (c) <= 'z') 527#define islower(c) ('a' <= (c) && (c) <= 'z')
528#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c)) 528#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
529 529
530unsigned long prom_strtoul(const char *cp, const char **endp) 530static unsigned long prom_strtoul(const char *cp, const char **endp)
531{ 531{
532 unsigned long result = 0, base = 10, value; 532 unsigned long result = 0, base = 10, value;
533 533
@@ -552,7 +552,7 @@ unsigned long prom_strtoul(const char *cp, const char **endp)
552 return result; 552 return result;
553} 553}
554 554
555unsigned long prom_memparse(const char *ptr, const char **retptr) 555static unsigned long prom_memparse(const char *ptr, const char **retptr)
556{ 556{
557 unsigned long ret = prom_strtoul(ptr, retptr); 557 unsigned long ret = prom_strtoul(ptr, retptr);
558 int shift = 0; 558 int shift = 0;
@@ -724,7 +724,8 @@ unsigned char ibm_architecture_vec[] = {
724 724
725}; 725};
726 726
727/* Old method - ELF header with PT_NOTE sections */ 727/* Old method - ELF header with PT_NOTE sections only works on BE */
728#ifdef __BIG_ENDIAN__
728static struct fake_elf { 729static struct fake_elf {
729 Elf32_Ehdr elfhdr; 730 Elf32_Ehdr elfhdr;
730 Elf32_Phdr phdr[2]; 731 Elf32_Phdr phdr[2];
@@ -810,6 +811,7 @@ static struct fake_elf {
810 } 811 }
811 } 812 }
812}; 813};
814#endif /* __BIG_ENDIAN__ */
813 815
814static int __init prom_count_smt_threads(void) 816static int __init prom_count_smt_threads(void)
815{ 817{
@@ -852,9 +854,9 @@ static int __init prom_count_smt_threads(void)
852 854
853static void __init prom_send_capabilities(void) 855static void __init prom_send_capabilities(void)
854{ 856{
855 ihandle elfloader, root; 857 ihandle root;
856 prom_arg_t ret; 858 prom_arg_t ret;
857 u32 *cores; 859 __be32 *cores;
858 860
859 root = call_prom("open", 1, 1, ADDR("/")); 861 root = call_prom("open", 1, 1, ADDR("/"));
860 if (root != 0) { 862 if (root != 0) {
@@ -864,15 +866,15 @@ static void __init prom_send_capabilities(void)
864 * (we assume this is the same for all cores) and use it to 866 * (we assume this is the same for all cores) and use it to
865 * divide NR_CPUS. 867 * divide NR_CPUS.
866 */ 868 */
867 cores = (u32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET]; 869 cores = (__be32 *)&ibm_architecture_vec[IBM_ARCH_VEC_NRCORES_OFFSET];
868 if (*cores != NR_CPUS) { 870 if (be32_to_cpup(cores) != NR_CPUS) {
869 prom_printf("WARNING ! " 871 prom_printf("WARNING ! "
870 "ibm_architecture_vec structure inconsistent: %lu!\n", 872 "ibm_architecture_vec structure inconsistent: %lu!\n",
871 *cores); 873 be32_to_cpup(cores));
872 } else { 874 } else {
873 *cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); 875 *cores = cpu_to_be32(DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()));
874 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", 876 prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n",
875 *cores, NR_CPUS); 877 be32_to_cpup(cores), NR_CPUS);
876 } 878 }
877 879
878 /* try calling the ibm,client-architecture-support method */ 880 /* try calling the ibm,client-architecture-support method */
@@ -893,17 +895,24 @@ static void __init prom_send_capabilities(void)
893 prom_printf(" not implemented\n"); 895 prom_printf(" not implemented\n");
894 } 896 }
895 897
896 /* no ibm,client-architecture-support call, try the old way */ 898#ifdef __BIG_ENDIAN__
897 elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader")); 899 {
898 if (elfloader == 0) { 900 ihandle elfloader;
899 prom_printf("couldn't open /packages/elf-loader\n"); 901
900 return; 902 /* no ibm,client-architecture-support call, try the old way */
903 elfloader = call_prom("open", 1, 1,
904 ADDR("/packages/elf-loader"));
905 if (elfloader == 0) {
906 prom_printf("couldn't open /packages/elf-loader\n");
907 return;
908 }
909 call_prom("call-method", 3, 1, ADDR("process-elf-header"),
910 elfloader, ADDR(&fake_elf));
911 call_prom("close", 1, 0, elfloader);
901 } 912 }
902 call_prom("call-method", 3, 1, ADDR("process-elf-header"), 913#endif /* __BIG_ENDIAN__ */
903 elfloader, ADDR(&fake_elf));
904 call_prom("close", 1, 0, elfloader);
905} 914}
906#endif 915#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
907 916
908/* 917/*
909 * Memory allocation strategy... our layout is normally: 918 * Memory allocation strategy... our layout is normally:
@@ -1050,11 +1059,11 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
1050 p++; 1059 p++;
1051 s--; 1060 s--;
1052 } 1061 }
1053 r = *p++; 1062 r = be32_to_cpu(*p++);
1054#ifdef CONFIG_PPC64 1063#ifdef CONFIG_PPC64
1055 if (s > 1) { 1064 if (s > 1) {
1056 r <<= 32; 1065 r <<= 32;
1057 r |= *(p++); 1066 r |= be32_to_cpu(*(p++));
1058 } 1067 }
1059#endif 1068#endif
1060 *cellp = p; 1069 *cellp = p;
@@ -1087,8 +1096,8 @@ static void __init reserve_mem(u64 base, u64 size)
1087 1096
1088 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1)) 1097 if (cnt >= (MEM_RESERVE_MAP_SIZE - 1))
1089 prom_panic("Memory reserve map exhausted !\n"); 1098 prom_panic("Memory reserve map exhausted !\n");
1090 mem_reserve_map[cnt].base = base; 1099 mem_reserve_map[cnt].base = cpu_to_be64(base);
1091 mem_reserve_map[cnt].size = size; 1100 mem_reserve_map[cnt].size = cpu_to_be64(size);
1092 mem_reserve_cnt = cnt + 1; 1101 mem_reserve_cnt = cnt + 1;
1093} 1102}
1094 1103
@@ -1102,6 +1111,7 @@ static void __init prom_init_mem(void)
1102 char *path, type[64]; 1111 char *path, type[64];
1103 unsigned int plen; 1112 unsigned int plen;
1104 cell_t *p, *endp; 1113 cell_t *p, *endp;
1114 __be32 val;
1105 u32 rac, rsc; 1115 u32 rac, rsc;
1106 1116
1107 /* 1117 /*
@@ -1109,12 +1119,14 @@ static void __init prom_init_mem(void)
1109 * 1) top of RMO (first node) 1119 * 1) top of RMO (first node)
1110 * 2) top of memory 1120 * 2) top of memory
1111 */ 1121 */
1112 rac = 2; 1122 val = cpu_to_be32(2);
1113 prom_getprop(prom.root, "#address-cells", &rac, sizeof(rac)); 1123 prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
1114 rsc = 1; 1124 rac = be32_to_cpu(val);
1115 prom_getprop(prom.root, "#size-cells", &rsc, sizeof(rsc)); 1125 val = cpu_to_be32(1);
1116 prom_debug("root_addr_cells: %x\n", (unsigned long) rac); 1126 prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
1117 prom_debug("root_size_cells: %x\n", (unsigned long) rsc); 1127 rsc = be32_to_cpu(val);
1128 prom_debug("root_addr_cells: %x\n", rac);
1129 prom_debug("root_size_cells: %x\n", rsc);
1118 1130
1119 prom_debug("scanning memory:\n"); 1131 prom_debug("scanning memory:\n");
1120 path = prom_scratch; 1132 path = prom_scratch;
@@ -1222,25 +1234,23 @@ static void __init prom_init_mem(void)
1222 1234
1223static void __init prom_close_stdin(void) 1235static void __init prom_close_stdin(void)
1224{ 1236{
1225 ihandle val; 1237 __be32 val;
1238 ihandle stdin;
1226 1239
1227 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) 1240 if (prom_getprop(prom.chosen, "stdin", &val, sizeof(val)) > 0) {
1228 call_prom("close", 1, 0, val); 1241 stdin = be32_to_cpu(val);
1242 call_prom("close", 1, 0, stdin);
1243 }
1229} 1244}
1230 1245
1231#ifdef CONFIG_PPC_POWERNV 1246#ifdef CONFIG_PPC_POWERNV
1232 1247
1233static u64 __initdata prom_opal_size;
1234static u64 __initdata prom_opal_align;
1235static int __initdata prom_rtas_start_cpu;
1236static u64 __initdata prom_rtas_data;
1237static u64 __initdata prom_rtas_entry;
1238
1239#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL 1248#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
1240static u64 __initdata prom_opal_base; 1249static u64 __initdata prom_opal_base;
1241static u64 __initdata prom_opal_entry; 1250static u64 __initdata prom_opal_entry;
1242#endif 1251#endif
1243 1252
1253#ifdef __BIG_ENDIAN__
1244/* XXX Don't change this structure without updating opal-takeover.S */ 1254/* XXX Don't change this structure without updating opal-takeover.S */
1245static struct opal_secondary_data { 1255static struct opal_secondary_data {
1246 s64 ack; /* 0 */ 1256 s64 ack; /* 0 */
@@ -1248,6 +1258,12 @@ static struct opal_secondary_data {
1248 struct opal_takeover_args args; /* 16 */ 1258 struct opal_takeover_args args; /* 16 */
1249} opal_secondary_data; 1259} opal_secondary_data;
1250 1260
1261static u64 __initdata prom_opal_align;
1262static u64 __initdata prom_opal_size;
1263static int __initdata prom_rtas_start_cpu;
1264static u64 __initdata prom_rtas_data;
1265static u64 __initdata prom_rtas_entry;
1266
1251extern char opal_secondary_entry; 1267extern char opal_secondary_entry;
1252 1268
1253static void __init prom_query_opal(void) 1269static void __init prom_query_opal(void)
@@ -1265,6 +1281,7 @@ static void __init prom_query_opal(void)
1265 } 1281 }
1266 1282
1267 prom_printf("Querying for OPAL presence... "); 1283 prom_printf("Querying for OPAL presence... ");
1284
1268 rc = opal_query_takeover(&prom_opal_size, 1285 rc = opal_query_takeover(&prom_opal_size,
1269 &prom_opal_align); 1286 &prom_opal_align);
1270 prom_debug("(rc = %ld) ", rc); 1287 prom_debug("(rc = %ld) ", rc);
@@ -1425,6 +1442,7 @@ static void __init prom_opal_takeover(void)
1425 for (;;) 1442 for (;;)
1426 opal_do_takeover(args); 1443 opal_do_takeover(args);
1427} 1444}
1445#endif /* __BIG_ENDIAN__ */
1428 1446
1429/* 1447/*
1430 * Allocate room for and instantiate OPAL 1448 * Allocate room for and instantiate OPAL
@@ -1435,6 +1453,7 @@ static void __init prom_instantiate_opal(void)
1435 ihandle opal_inst; 1453 ihandle opal_inst;
1436 u64 base, entry; 1454 u64 base, entry;
1437 u64 size = 0, align = 0x10000; 1455 u64 size = 0, align = 0x10000;
1456 __be64 val64;
1438 u32 rets[2]; 1457 u32 rets[2];
1439 1458
1440 prom_debug("prom_instantiate_opal: start...\n"); 1459 prom_debug("prom_instantiate_opal: start...\n");
@@ -1444,11 +1463,14 @@ static void __init prom_instantiate_opal(void)
1444 if (!PHANDLE_VALID(opal_node)) 1463 if (!PHANDLE_VALID(opal_node))
1445 return; 1464 return;
1446 1465
1447 prom_getprop(opal_node, "opal-runtime-size", &size, sizeof(size)); 1466 val64 = 0;
1467 prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
1468 size = be64_to_cpu(val64);
1448 if (size == 0) 1469 if (size == 0)
1449 return; 1470 return;
1450 prom_getprop(opal_node, "opal-runtime-alignment", &align, 1471 val64 = 0;
1451 sizeof(align)); 1472 prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
1473 align = be64_to_cpu(val64);
1452 1474
1453 base = alloc_down(size, align, 0); 1475 base = alloc_down(size, align, 0);
1454 if (base == 0) { 1476 if (base == 0) {
@@ -1505,6 +1527,7 @@ static void __init prom_instantiate_rtas(void)
1505 phandle rtas_node; 1527 phandle rtas_node;
1506 ihandle rtas_inst; 1528 ihandle rtas_inst;
1507 u32 base, entry = 0; 1529 u32 base, entry = 0;
1530 __be32 val;
1508 u32 size = 0; 1531 u32 size = 0;
1509 1532
1510 prom_debug("prom_instantiate_rtas: start...\n"); 1533 prom_debug("prom_instantiate_rtas: start...\n");
@@ -1514,7 +1537,9 @@ static void __init prom_instantiate_rtas(void)
1514 if (!PHANDLE_VALID(rtas_node)) 1537 if (!PHANDLE_VALID(rtas_node))
1515 return; 1538 return;
1516 1539
1517 prom_getprop(rtas_node, "rtas-size", &size, sizeof(size)); 1540 val = 0;
1541 prom_getprop(rtas_node, "rtas-size", &val, sizeof(size));
1542 size = be32_to_cpu(val);
1518 if (size == 0) 1543 if (size == 0)
1519 return; 1544 return;
1520 1545
@@ -1541,12 +1566,14 @@ static void __init prom_instantiate_rtas(void)
1541 1566
1542 reserve_mem(base, size); 1567 reserve_mem(base, size);
1543 1568
1569 val = cpu_to_be32(base);
1544 prom_setprop(rtas_node, "/rtas", "linux,rtas-base", 1570 prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
1545 &base, sizeof(base)); 1571 &val, sizeof(val));
1572 val = cpu_to_be32(entry);
1546 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry", 1573 prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
1547 &entry, sizeof(entry)); 1574 &val, sizeof(val));
1548 1575
1549#ifdef CONFIG_PPC_POWERNV 1576#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1550 /* PowerVN takeover hack */ 1577 /* PowerVN takeover hack */
1551 prom_rtas_data = base; 1578 prom_rtas_data = base;
1552 prom_rtas_entry = entry; 1579 prom_rtas_entry = entry;
@@ -1620,6 +1647,7 @@ static void __init prom_instantiate_sml(void)
1620/* 1647/*
1621 * Allocate room for and initialize TCE tables 1648 * Allocate room for and initialize TCE tables
1622 */ 1649 */
1650#ifdef __BIG_ENDIAN__
1623static void __init prom_initialize_tce_table(void) 1651static void __init prom_initialize_tce_table(void)
1624{ 1652{
1625 phandle node; 1653 phandle node;
@@ -1748,7 +1776,8 @@ static void __init prom_initialize_tce_table(void)
1748 /* Flag the first invalid entry */ 1776 /* Flag the first invalid entry */
1749 prom_debug("ending prom_initialize_tce_table\n"); 1777 prom_debug("ending prom_initialize_tce_table\n");
1750} 1778}
1751#endif 1779#endif /* __BIG_ENDIAN__ */
1780#endif /* CONFIG_PPC64 */
1752 1781
1753/* 1782/*
1754 * With CHRP SMP we need to use the OF to start the other processors. 1783 * With CHRP SMP we need to use the OF to start the other processors.
@@ -1777,7 +1806,6 @@ static void __init prom_initialize_tce_table(void)
1777static void __init prom_hold_cpus(void) 1806static void __init prom_hold_cpus(void)
1778{ 1807{
1779 unsigned long i; 1808 unsigned long i;
1780 unsigned int reg;
1781 phandle node; 1809 phandle node;
1782 char type[64]; 1810 char type[64];
1783 unsigned long *spinloop 1811 unsigned long *spinloop
@@ -1803,6 +1831,9 @@ static void __init prom_hold_cpus(void)
1803 1831
1804 /* look for cpus */ 1832 /* look for cpus */
1805 for (node = 0; prom_next_node(&node); ) { 1833 for (node = 0; prom_next_node(&node); ) {
1834 unsigned int cpu_no;
1835 __be32 reg;
1836
1806 type[0] = 0; 1837 type[0] = 0;
1807 prom_getprop(node, "device_type", type, sizeof(type)); 1838 prom_getprop(node, "device_type", type, sizeof(type));
1808 if (strcmp(type, "cpu") != 0) 1839 if (strcmp(type, "cpu") != 0)
@@ -1813,10 +1844,11 @@ static void __init prom_hold_cpus(void)
1813 if (strcmp(type, "okay") != 0) 1844 if (strcmp(type, "okay") != 0)
1814 continue; 1845 continue;
1815 1846
1816 reg = -1; 1847 reg = cpu_to_be32(-1); /* make sparse happy */
1817 prom_getprop(node, "reg", &reg, sizeof(reg)); 1848 prom_getprop(node, "reg", &reg, sizeof(reg));
1849 cpu_no = be32_to_cpu(reg);
1818 1850
1819 prom_debug("cpu hw idx = %lu\n", reg); 1851 prom_debug("cpu hw idx = %lu\n", cpu_no);
1820 1852
1821 /* Init the acknowledge var which will be reset by 1853 /* Init the acknowledge var which will be reset by
1822 * the secondary cpu when it awakens from its OF 1854 * the secondary cpu when it awakens from its OF
@@ -1824,24 +1856,24 @@ static void __init prom_hold_cpus(void)
1824 */ 1856 */
1825 *acknowledge = (unsigned long)-1; 1857 *acknowledge = (unsigned long)-1;
1826 1858
1827 if (reg != prom.cpu) { 1859 if (cpu_no != prom.cpu) {
1828 /* Primary Thread of non-boot cpu or any thread */ 1860 /* Primary Thread of non-boot cpu or any thread */
1829 prom_printf("starting cpu hw idx %lu... ", reg); 1861 prom_printf("starting cpu hw idx %lu... ", cpu_no);
1830 call_prom("start-cpu", 3, 0, node, 1862 call_prom("start-cpu", 3, 0, node,
1831 secondary_hold, reg); 1863 secondary_hold, cpu_no);
1832 1864
1833 for (i = 0; (i < 100000000) && 1865 for (i = 0; (i < 100000000) &&
1834 (*acknowledge == ((unsigned long)-1)); i++ ) 1866 (*acknowledge == ((unsigned long)-1)); i++ )
1835 mb(); 1867 mb();
1836 1868
1837 if (*acknowledge == reg) 1869 if (*acknowledge == cpu_no)
1838 prom_printf("done\n"); 1870 prom_printf("done\n");
1839 else 1871 else
1840 prom_printf("failed: %x\n", *acknowledge); 1872 prom_printf("failed: %x\n", *acknowledge);
1841 } 1873 }
1842#ifdef CONFIG_SMP 1874#ifdef CONFIG_SMP
1843 else 1875 else
1844 prom_printf("boot cpu hw idx %lu\n", reg); 1876 prom_printf("boot cpu hw idx %lu\n", cpu_no);
1845#endif /* CONFIG_SMP */ 1877#endif /* CONFIG_SMP */
1846 } 1878 }
1847 1879
@@ -1895,6 +1927,7 @@ static void __init prom_find_mmu(void)
1895 prom.memory = call_prom("open", 1, 1, ADDR("/memory")); 1927 prom.memory = call_prom("open", 1, 1, ADDR("/memory"));
1896 prom_getprop(prom.chosen, "mmu", &prom.mmumap, 1928 prom_getprop(prom.chosen, "mmu", &prom.mmumap,
1897 sizeof(prom.mmumap)); 1929 sizeof(prom.mmumap));
1930 prom.mmumap = be32_to_cpu(prom.mmumap);
1898 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap)) 1931 if (!IHANDLE_VALID(prom.memory) || !IHANDLE_VALID(prom.mmumap))
1899 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */ 1932 of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
1900} 1933}
@@ -1906,17 +1939,19 @@ static void __init prom_init_stdout(void)
1906{ 1939{
1907 char *path = of_stdout_device; 1940 char *path = of_stdout_device;
1908 char type[16]; 1941 char type[16];
1909 u32 val; 1942 phandle stdout_node;
1943 __be32 val;
1910 1944
1911 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0) 1945 if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
1912 prom_panic("cannot find stdout"); 1946 prom_panic("cannot find stdout");
1913 1947
1914 prom.stdout = val; 1948 prom.stdout = be32_to_cpu(val);
1915 1949
1916 /* Get the full OF pathname of the stdout device */ 1950 /* Get the full OF pathname of the stdout device */
1917 memset(path, 0, 256); 1951 memset(path, 0, 256);
1918 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); 1952 call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
1919 val = call_prom("instance-to-package", 1, 1, prom.stdout); 1953 stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout);
1954 val = cpu_to_be32(stdout_node);
1920 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", 1955 prom_setprop(prom.chosen, "/chosen", "linux,stdout-package",
1921 &val, sizeof(val)); 1956 &val, sizeof(val));
1922 prom_printf("OF stdout device is: %s\n", of_stdout_device); 1957 prom_printf("OF stdout device is: %s\n", of_stdout_device);
@@ -1925,9 +1960,9 @@ static void __init prom_init_stdout(void)
1925 1960
1926 /* If it's a display, note it */ 1961 /* If it's a display, note it */
1927 memset(type, 0, sizeof(type)); 1962 memset(type, 0, sizeof(type));
1928 prom_getprop(val, "device_type", type, sizeof(type)); 1963 prom_getprop(stdout_node, "device_type", type, sizeof(type));
1929 if (strcmp(type, "display") == 0) 1964 if (strcmp(type, "display") == 0)
1930 prom_setprop(val, path, "linux,boot-display", NULL, 0); 1965 prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
1931} 1966}
1932 1967
1933static int __init prom_find_machine_type(void) 1968static int __init prom_find_machine_type(void)
@@ -2082,6 +2117,22 @@ static void __init prom_check_displays(void)
2082 clut[2]) != 0) 2117 clut[2]) != 0)
2083 break; 2118 break;
2084#endif /* CONFIG_LOGO_LINUX_CLUT224 */ 2119#endif /* CONFIG_LOGO_LINUX_CLUT224 */
2120
2121#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
2122 if (prom_getprop(node, "linux,boot-display", NULL, 0) !=
2123 PROM_ERROR) {
2124 u32 width, height, pitch, addr;
2125
2126 prom_printf("Setting btext !\n");
2127 prom_getprop(node, "width", &width, 4);
2128 prom_getprop(node, "height", &height, 4);
2129 prom_getprop(node, "linebytes", &pitch, 4);
2130 prom_getprop(node, "address", &addr, 4);
2131 prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
2132 width, height, pitch, addr);
2133 btext_setup_display(width, height, 8, pitch, addr);
2134 }
2135#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
2085 } 2136 }
2086} 2137}
2087 2138
@@ -2117,8 +2168,10 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end,
2117 return ret; 2168 return ret;
2118} 2169}
2119 2170
2120#define dt_push_token(token, mem_start, mem_end) \ 2171#define dt_push_token(token, mem_start, mem_end) do { \
2121 do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0) 2172 void *room = make_room(mem_start, mem_end, 4, 4); \
2173 *(__be32 *)room = cpu_to_be32(token); \
2174 } while(0)
2122 2175
2123static unsigned long __init dt_find_string(char *str) 2176static unsigned long __init dt_find_string(char *str)
2124{ 2177{
@@ -2291,7 +2344,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2291 dt_push_token(4, mem_start, mem_end); 2344 dt_push_token(4, mem_start, mem_end);
2292 dt_push_token(soff, mem_start, mem_end); 2345 dt_push_token(soff, mem_start, mem_end);
2293 valp = make_room(mem_start, mem_end, 4, 4); 2346 valp = make_room(mem_start, mem_end, 4, 4);
2294 *(u32 *)valp = node; 2347 *(__be32 *)valp = cpu_to_be32(node);
2295 } 2348 }
2296 } 2349 }
2297 2350
@@ -2364,16 +2417,16 @@ static void __init flatten_device_tree(void)
2364 dt_struct_end = PAGE_ALIGN(mem_start); 2417 dt_struct_end = PAGE_ALIGN(mem_start);
2365 2418
2366 /* Finish header */ 2419 /* Finish header */
2367 hdr->boot_cpuid_phys = prom.cpu; 2420 hdr->boot_cpuid_phys = cpu_to_be32(prom.cpu);
2368 hdr->magic = OF_DT_HEADER; 2421 hdr->magic = cpu_to_be32(OF_DT_HEADER);
2369 hdr->totalsize = dt_struct_end - dt_header_start; 2422 hdr->totalsize = cpu_to_be32(dt_struct_end - dt_header_start);
2370 hdr->off_dt_struct = dt_struct_start - dt_header_start; 2423 hdr->off_dt_struct = cpu_to_be32(dt_struct_start - dt_header_start);
2371 hdr->off_dt_strings = dt_string_start - dt_header_start; 2424 hdr->off_dt_strings = cpu_to_be32(dt_string_start - dt_header_start);
2372 hdr->dt_strings_size = dt_string_end - dt_string_start; 2425 hdr->dt_strings_size = cpu_to_be32(dt_string_end - dt_string_start);
2373 hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - dt_header_start; 2426 hdr->off_mem_rsvmap = cpu_to_be32(((unsigned long)rsvmap) - dt_header_start);
2374 hdr->version = OF_DT_VERSION; 2427 hdr->version = cpu_to_be32(OF_DT_VERSION);
2375 /* Version 16 is not backward compatible */ 2428 /* Version 16 is not backward compatible */
2376 hdr->last_comp_version = 0x10; 2429 hdr->last_comp_version = cpu_to_be32(0x10);
2377 2430
2378 /* Copy the reserve map in */ 2431 /* Copy the reserve map in */
2379 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map)); 2432 memcpy(rsvmap, mem_reserve_map, sizeof(mem_reserve_map));
@@ -2384,8 +2437,8 @@ static void __init flatten_device_tree(void)
2384 prom_printf("reserved memory map:\n"); 2437 prom_printf("reserved memory map:\n");
2385 for (i = 0; i < mem_reserve_cnt; i++) 2438 for (i = 0; i < mem_reserve_cnt; i++)
2386 prom_printf(" %x - %x\n", 2439 prom_printf(" %x - %x\n",
2387 mem_reserve_map[i].base, 2440 be64_to_cpu(mem_reserve_map[i].base),
2388 mem_reserve_map[i].size); 2441 be64_to_cpu(mem_reserve_map[i].size));
2389 } 2442 }
2390#endif 2443#endif
2391 /* Bump mem_reserve_cnt to cause further reservations to fail 2444 /* Bump mem_reserve_cnt to cause further reservations to fail
@@ -2397,7 +2450,6 @@ static void __init flatten_device_tree(void)
2397 dt_string_start, dt_string_end); 2450 dt_string_start, dt_string_end);
2398 prom_printf("Device tree struct 0x%x -> 0x%x\n", 2451 prom_printf("Device tree struct 0x%x -> 0x%x\n",
2399 dt_struct_start, dt_struct_end); 2452 dt_struct_start, dt_struct_end);
2400
2401} 2453}
2402 2454
2403#ifdef CONFIG_PPC_MAPLE 2455#ifdef CONFIG_PPC_MAPLE
@@ -2730,18 +2782,19 @@ static void __init fixup_device_tree(void)
2730 2782
2731static void __init prom_find_boot_cpu(void) 2783static void __init prom_find_boot_cpu(void)
2732{ 2784{
2733 u32 getprop_rval; 2785 __be32 rval;
2734 ihandle prom_cpu; 2786 ihandle prom_cpu;
2735 phandle cpu_pkg; 2787 phandle cpu_pkg;
2736 2788
2737 prom.cpu = 0; 2789 rval = 0;
2738 if (prom_getprop(prom.chosen, "cpu", &prom_cpu, sizeof(prom_cpu)) <= 0) 2790 if (prom_getprop(prom.chosen, "cpu", &rval, sizeof(rval)) <= 0)
2739 return; 2791 return;
2792 prom_cpu = be32_to_cpu(rval);
2740 2793
2741 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu); 2794 cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
2742 2795
2743 prom_getprop(cpu_pkg, "reg", &getprop_rval, sizeof(getprop_rval)); 2796 prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
2744 prom.cpu = getprop_rval; 2797 prom.cpu = be32_to_cpu(rval);
2745 2798
2746 prom_debug("Booting CPU hw index = %lu\n", prom.cpu); 2799 prom_debug("Booting CPU hw index = %lu\n", prom.cpu);
2747} 2800}
@@ -2750,15 +2803,15 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
2750{ 2803{
2751#ifdef CONFIG_BLK_DEV_INITRD 2804#ifdef CONFIG_BLK_DEV_INITRD
2752 if (r3 && r4 && r4 != 0xdeadbeef) { 2805 if (r3 && r4 && r4 != 0xdeadbeef) {
2753 unsigned long val; 2806 __be64 val;
2754 2807
2755 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3; 2808 prom_initrd_start = is_kernel_addr(r3) ? __pa(r3) : r3;
2756 prom_initrd_end = prom_initrd_start + r4; 2809 prom_initrd_end = prom_initrd_start + r4;
2757 2810
2758 val = prom_initrd_start; 2811 val = cpu_to_be64(prom_initrd_start);
2759 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start", 2812 prom_setprop(prom.chosen, "/chosen", "linux,initrd-start",
2760 &val, sizeof(val)); 2813 &val, sizeof(val));
2761 val = prom_initrd_end; 2814 val = cpu_to_be64(prom_initrd_end);
2762 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end", 2815 prom_setprop(prom.chosen, "/chosen", "linux,initrd-end",
2763 &val, sizeof(val)); 2816 &val, sizeof(val));
2764 2817
@@ -2915,7 +2968,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2915 */ 2968 */
2916 prom_check_displays(); 2969 prom_check_displays();
2917 2970
2918#ifdef CONFIG_PPC64 2971#if defined(CONFIG_PPC64) && defined(__BIG_ENDIAN__)
2919 /* 2972 /*
2920 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else 2973 * Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
2921 * that uses the allocator, we need to make sure we get the top of memory 2974 * that uses the allocator, we need to make sure we get the top of memory
@@ -2934,6 +2987,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2934 prom_instantiate_rtas(); 2987 prom_instantiate_rtas();
2935 2988
2936#ifdef CONFIG_PPC_POWERNV 2989#ifdef CONFIG_PPC_POWERNV
2990#ifdef __BIG_ENDIAN__
2937 /* Detect HAL and try instanciating it & doing takeover */ 2991 /* Detect HAL and try instanciating it & doing takeover */
2938 if (of_platform == PLATFORM_PSERIES_LPAR) { 2992 if (of_platform == PLATFORM_PSERIES_LPAR) {
2939 prom_query_opal(); 2993 prom_query_opal();
@@ -2941,9 +2995,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2941 prom_opal_hold_cpus(); 2995 prom_opal_hold_cpus();
2942 prom_opal_takeover(); 2996 prom_opal_takeover();
2943 } 2997 }
2944 } else if (of_platform == PLATFORM_OPAL) 2998 } else
2999#endif /* __BIG_ENDIAN__ */
3000 if (of_platform == PLATFORM_OPAL)
2945 prom_instantiate_opal(); 3001 prom_instantiate_opal();
2946#endif 3002#endif /* CONFIG_PPC_POWERNV */
2947 3003
2948#ifdef CONFIG_PPC64 3004#ifdef CONFIG_PPC64
2949 /* instantiate sml */ 3005 /* instantiate sml */
@@ -2962,10 +3018,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
2962 /* 3018 /*
2963 * Fill in some infos for use by the kernel later on 3019 * Fill in some infos for use by the kernel later on
2964 */ 3020 */
2965 if (prom_memory_limit) 3021 if (prom_memory_limit) {
3022 __be64 val = cpu_to_be64(prom_memory_limit);
2966 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit", 3023 prom_setprop(prom.chosen, "/chosen", "linux,memory-limit",
2967 &prom_memory_limit, 3024 &val, sizeof(val));
2968 sizeof(prom_memory_limit)); 3025 }
2969#ifdef CONFIG_PPC64 3026#ifdef CONFIG_PPC64
2970 if (prom_iommu_off) 3027 if (prom_iommu_off)
2971 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off", 3028 prom_setprop(prom.chosen, "/chosen", "linux,iommu-off",
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 3765da6be4f2..b0c263da219a 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -22,7 +22,8 @@ __secondary_hold_acknowledge __secondary_hold_spinloop __start
22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
23reloc_got2 kernstart_addr memstart_addr linux_banner _stext 23reloc_got2 kernstart_addr memstart_addr linux_banner _stext
24opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry 24opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
25boot_command_line __prom_init_toc_start __prom_init_toc_end" 25boot_command_line __prom_init_toc_start __prom_init_toc_end
26btext_setup_display"
26 27
27NM="$1" 28NM="$1"
28OBJ="$2" 29OBJ="$2"
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c
index 4e1331b8eb33..6295e646f78c 100644
--- a/arch/powerpc/kernel/prom_parse.c
+++ b/arch/powerpc/kernel/prom_parse.c
@@ -7,28 +7,27 @@
7#include <linux/of_address.h> 7#include <linux/of_address.h>
8#include <asm/prom.h> 8#include <asm/prom.h>
9 9
10void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, 10void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window,
11 unsigned long *busno, unsigned long *phys, unsigned long *size) 11 unsigned long *busno, unsigned long *phys,
12 unsigned long *size)
12{ 13{
13 const u32 *dma_window;
14 u32 cells; 14 u32 cells;
15 const unsigned char *prop; 15 const __be32 *prop;
16
17 dma_window = dma_window_prop;
18 16
19 /* busno is always one cell */ 17 /* busno is always one cell */
20 *busno = *(dma_window++); 18 *busno = of_read_number(dma_window, 1);
19 dma_window++;
21 20
22 prop = of_get_property(dn, "ibm,#dma-address-cells", NULL); 21 prop = of_get_property(dn, "ibm,#dma-address-cells", NULL);
23 if (!prop) 22 if (!prop)
24 prop = of_get_property(dn, "#address-cells", NULL); 23 prop = of_get_property(dn, "#address-cells", NULL);
25 24
26 cells = prop ? *(u32 *)prop : of_n_addr_cells(dn); 25 cells = prop ? of_read_number(prop, 1) : of_n_addr_cells(dn);
27 *phys = of_read_number(dma_window, cells); 26 *phys = of_read_number(dma_window, cells);
28 27
29 dma_window += cells; 28 dma_window += cells;
30 29
31 prop = of_get_property(dn, "ibm,#dma-size-cells", NULL); 30 prop = of_get_property(dn, "ibm,#dma-size-cells", NULL);
32 cells = prop ? *(u32 *)prop : of_n_size_cells(dn); 31 cells = prop ? of_read_number(prop, 1) : of_n_size_cells(dn);
33 *size = of_read_number(dma_window, cells); 32 *size = of_read_number(dma_window, cells);
34} 33}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 80b5ef403f68..4cf674d7d5ae 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -91,7 +91,7 @@ static void unlock_rtas(unsigned long flags)
91 * are designed only for very early low-level debugging, which 91 * are designed only for very early low-level debugging, which
92 * is why the token is hard-coded to 10. 92 * is why the token is hard-coded to 10.
93 */ 93 */
94static void call_rtas_display_status(char c) 94static void call_rtas_display_status(unsigned char c)
95{ 95{
96 struct rtas_args *args = &rtas.args; 96 struct rtas_args *args = &rtas.args;
97 unsigned long s; 97 unsigned long s;
@@ -100,11 +100,11 @@ static void call_rtas_display_status(char c)
100 return; 100 return;
101 s = lock_rtas(); 101 s = lock_rtas();
102 102
103 args->token = 10; 103 args->token = cpu_to_be32(10);
104 args->nargs = 1; 104 args->nargs = cpu_to_be32(1);
105 args->nret = 1; 105 args->nret = cpu_to_be32(1);
106 args->rets = (rtas_arg_t *)&(args->args[1]); 106 args->rets = &(args->args[1]);
107 args->args[0] = (unsigned char)c; 107 args->args[0] = cpu_to_be32(c);
108 108
109 enter_rtas(__pa(args)); 109 enter_rtas(__pa(args));
110 110
@@ -204,7 +204,7 @@ void rtas_progress(char *s, unsigned short hex)
204{ 204{
205 struct device_node *root; 205 struct device_node *root;
206 int width; 206 int width;
207 const int *p; 207 const __be32 *p;
208 char *os; 208 char *os;
209 static int display_character, set_indicator; 209 static int display_character, set_indicator;
210 static int display_width, display_lines, form_feed; 210 static int display_width, display_lines, form_feed;
@@ -221,13 +221,13 @@ void rtas_progress(char *s, unsigned short hex)
221 if ((root = of_find_node_by_path("/rtas"))) { 221 if ((root = of_find_node_by_path("/rtas"))) {
222 if ((p = of_get_property(root, 222 if ((p = of_get_property(root,
223 "ibm,display-line-length", NULL))) 223 "ibm,display-line-length", NULL)))
224 display_width = *p; 224 display_width = be32_to_cpu(*p);
225 if ((p = of_get_property(root, 225 if ((p = of_get_property(root,
226 "ibm,form-feed", NULL))) 226 "ibm,form-feed", NULL)))
227 form_feed = *p; 227 form_feed = be32_to_cpu(*p);
228 if ((p = of_get_property(root, 228 if ((p = of_get_property(root,
229 "ibm,display-number-of-lines", NULL))) 229 "ibm,display-number-of-lines", NULL)))
230 display_lines = *p; 230 display_lines = be32_to_cpu(*p);
231 row_width = of_get_property(root, 231 row_width = of_get_property(root,
232 "ibm,display-truncation-length", NULL); 232 "ibm,display-truncation-length", NULL);
233 of_node_put(root); 233 of_node_put(root);
@@ -322,11 +322,11 @@ EXPORT_SYMBOL(rtas_progress); /* needed by rtas_flash module */
322 322
323int rtas_token(const char *service) 323int rtas_token(const char *service)
324{ 324{
325 const int *tokp; 325 const __be32 *tokp;
326 if (rtas.dev == NULL) 326 if (rtas.dev == NULL)
327 return RTAS_UNKNOWN_SERVICE; 327 return RTAS_UNKNOWN_SERVICE;
328 tokp = of_get_property(rtas.dev, service, NULL); 328 tokp = of_get_property(rtas.dev, service, NULL);
329 return tokp ? *tokp : RTAS_UNKNOWN_SERVICE; 329 return tokp ? be32_to_cpu(*tokp) : RTAS_UNKNOWN_SERVICE;
330} 330}
331EXPORT_SYMBOL(rtas_token); 331EXPORT_SYMBOL(rtas_token);
332 332
@@ -380,11 +380,11 @@ static char *__fetch_rtas_last_error(char *altbuf)
380 380
381 bufsz = rtas_get_error_log_max(); 381 bufsz = rtas_get_error_log_max();
382 382
383 err_args.token = rtas_last_error_token; 383 err_args.token = cpu_to_be32(rtas_last_error_token);
384 err_args.nargs = 2; 384 err_args.nargs = cpu_to_be32(2);
385 err_args.nret = 1; 385 err_args.nret = cpu_to_be32(1);
386 err_args.args[0] = (rtas_arg_t)__pa(rtas_err_buf); 386 err_args.args[0] = cpu_to_be32(__pa(rtas_err_buf));
387 err_args.args[1] = bufsz; 387 err_args.args[1] = cpu_to_be32(bufsz);
388 err_args.args[2] = 0; 388 err_args.args[2] = 0;
389 389
390 save_args = rtas.args; 390 save_args = rtas.args;
@@ -433,13 +433,13 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
433 s = lock_rtas(); 433 s = lock_rtas();
434 rtas_args = &rtas.args; 434 rtas_args = &rtas.args;
435 435
436 rtas_args->token = token; 436 rtas_args->token = cpu_to_be32(token);
437 rtas_args->nargs = nargs; 437 rtas_args->nargs = cpu_to_be32(nargs);
438 rtas_args->nret = nret; 438 rtas_args->nret = cpu_to_be32(nret);
439 rtas_args->rets = (rtas_arg_t *)&(rtas_args->args[nargs]); 439 rtas_args->rets = &(rtas_args->args[nargs]);
440 va_start(list, outputs); 440 va_start(list, outputs);
441 for (i = 0; i < nargs; ++i) 441 for (i = 0; i < nargs; ++i)
442 rtas_args->args[i] = va_arg(list, rtas_arg_t); 442 rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
443 va_end(list); 443 va_end(list);
444 444
445 for (i = 0; i < nret; ++i) 445 for (i = 0; i < nret; ++i)
@@ -449,13 +449,13 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
449 449
450 /* A -1 return code indicates that the last command couldn't 450 /* A -1 return code indicates that the last command couldn't
451 be completed due to a hardware error. */ 451 be completed due to a hardware error. */
452 if (rtas_args->rets[0] == -1) 452 if (be32_to_cpu(rtas_args->rets[0]) == -1)
453 buff_copy = __fetch_rtas_last_error(NULL); 453 buff_copy = __fetch_rtas_last_error(NULL);
454 454
455 if (nret > 1 && outputs != NULL) 455 if (nret > 1 && outputs != NULL)
456 for (i = 0; i < nret-1; ++i) 456 for (i = 0; i < nret-1; ++i)
457 outputs[i] = rtas_args->rets[i+1]; 457 outputs[i] = be32_to_cpu(rtas_args->rets[i+1]);
458 ret = (nret > 0)? rtas_args->rets[0]: 0; 458 ret = (nret > 0)? be32_to_cpu(rtas_args->rets[0]): 0;
459 459
460 unlock_rtas(s); 460 unlock_rtas(s);
461 461
@@ -588,8 +588,8 @@ bool rtas_indicator_present(int token, int *maxindex)
588{ 588{
589 int proplen, count, i; 589 int proplen, count, i;
590 const struct indicator_elem { 590 const struct indicator_elem {
591 u32 token; 591 __be32 token;
592 u32 maxindex; 592 __be32 maxindex;
593 } *indicators; 593 } *indicators;
594 594
595 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); 595 indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen);
@@ -599,10 +599,10 @@ bool rtas_indicator_present(int token, int *maxindex)
599 count = proplen / sizeof(struct indicator_elem); 599 count = proplen / sizeof(struct indicator_elem);
600 600
601 for (i = 0; i < count; i++) { 601 for (i = 0; i < count; i++) {
602 if (indicators[i].token != token) 602 if (__be32_to_cpu(indicators[i].token) != token)
603 continue; 603 continue;
604 if (maxindex) 604 if (maxindex)
605 *maxindex = indicators[i].maxindex; 605 *maxindex = __be32_to_cpu(indicators[i].maxindex);
606 return true; 606 return true;
607 } 607 }
608 608
@@ -1097,19 +1097,19 @@ void __init rtas_initialize(void)
1097 */ 1097 */
1098 rtas.dev = of_find_node_by_name(NULL, "rtas"); 1098 rtas.dev = of_find_node_by_name(NULL, "rtas");
1099 if (rtas.dev) { 1099 if (rtas.dev) {
1100 const u32 *basep, *entryp, *sizep; 1100 const __be32 *basep, *entryp, *sizep;
1101 1101
1102 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); 1102 basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
1103 sizep = of_get_property(rtas.dev, "rtas-size", NULL); 1103 sizep = of_get_property(rtas.dev, "rtas-size", NULL);
1104 if (basep != NULL && sizep != NULL) { 1104 if (basep != NULL && sizep != NULL) {
1105 rtas.base = *basep; 1105 rtas.base = __be32_to_cpu(*basep);
1106 rtas.size = *sizep; 1106 rtas.size = __be32_to_cpu(*sizep);
1107 entryp = of_get_property(rtas.dev, 1107 entryp = of_get_property(rtas.dev,
1108 "linux,rtas-entry", NULL); 1108 "linux,rtas-entry", NULL);
1109 if (entryp == NULL) /* Ugh */ 1109 if (entryp == NULL) /* Ugh */
1110 rtas.entry = rtas.base; 1110 rtas.entry = rtas.base;
1111 else 1111 else
1112 rtas.entry = *entryp; 1112 rtas.entry = __be32_to_cpu(*entryp);
1113 } else 1113 } else
1114 rtas.dev = NULL; 1114 rtas.dev = NULL;
1115 } 1115 }
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 63d051f5b7a5..3d261c071fc8 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -436,7 +436,8 @@ void __init smp_setup_cpu_maps(void)
436 DBG("smp_setup_cpu_maps()\n"); 436 DBG("smp_setup_cpu_maps()\n");
437 437
438 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { 438 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) {
439 const int *intserv; 439 const __be32 *intserv;
440 __be32 cpu_be;
440 int j, len; 441 int j, len;
441 442
442 DBG(" * %s...\n", dn->full_name); 443 DBG(" * %s...\n", dn->full_name);
@@ -450,15 +451,17 @@ void __init smp_setup_cpu_maps(void)
450 } else { 451 } else {
451 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); 452 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n");
452 intserv = of_get_property(dn, "reg", NULL); 453 intserv = of_get_property(dn, "reg", NULL);
453 if (!intserv) 454 if (!intserv) {
454 intserv = &cpu; /* assume logical == phys */ 455 cpu_be = cpu_to_be32(cpu);
456 intserv = &cpu_be; /* assume logical == phys */
457 }
455 } 458 }
456 459
457 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { 460 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
458 DBG(" thread %d -> cpu %d (hard id %d)\n", 461 DBG(" thread %d -> cpu %d (hard id %d)\n",
459 j, cpu, intserv[j]); 462 j, cpu, be32_to_cpu(intserv[j]));
460 set_cpu_present(cpu, true); 463 set_cpu_present(cpu, true);
461 set_hard_smp_processor_id(cpu, intserv[j]); 464 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
462 set_cpu_possible(cpu, true); 465 set_cpu_possible(cpu, true);
463 cpu++; 466 cpu++;
464 } 467 }
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index a8f54ecb091f..a4bbcae72578 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -38,6 +38,7 @@
38#include <asm/serial.h> 38#include <asm/serial.h>
39#include <asm/udbg.h> 39#include <asm/udbg.h>
40#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
41#include <asm/epapr_hcalls.h>
41 42
42#include "setup.h" 43#include "setup.h"
43 44
@@ -128,6 +129,8 @@ notrace void __init machine_init(u64 dt_ptr)
128 /* Do some early initialization based on the flat device tree */ 129 /* Do some early initialization based on the flat device tree */
129 early_init_devtree(__va(dt_ptr)); 130 early_init_devtree(__va(dt_ptr));
130 131
132 epapr_paravirt_early_init();
133
131 early_init_mmu(); 134 early_init_mmu();
132 135
133 probe_machine(); 136 probe_machine();
@@ -326,5 +329,4 @@ void __init setup_arch(char **cmdline_p)
326 329
327 /* Initialize the MMU context management stuff */ 330 /* Initialize the MMU context management stuff */
328 mmu_context_init(); 331 mmu_context_init();
329
330} 332}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389fb8077cc9..278ca93e1f28 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -10,7 +10,7 @@
10 * 2 of the License, or (at your option) any later version. 10 * 2 of the License, or (at your option) any later version.
11 */ 11 */
12 12
13#undef DEBUG 13#define DEBUG
14 14
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/string.h> 16#include <linux/string.h>
@@ -66,6 +66,7 @@
66#include <asm/code-patching.h> 66#include <asm/code-patching.h>
67#include <asm/kvm_ppc.h> 67#include <asm/kvm_ppc.h>
68#include <asm/hugetlb.h> 68#include <asm/hugetlb.h>
69#include <asm/epapr_hcalls.h>
69 70
70#include "setup.h" 71#include "setup.h"
71 72
@@ -215,6 +216,8 @@ void __init early_setup(unsigned long dt_ptr)
215 */ 216 */
216 early_init_devtree(__va(dt_ptr)); 217 early_init_devtree(__va(dt_ptr));
217 218
219 epapr_paravirt_early_init();
220
218 /* Now we know the logical id of our boot cpu, setup the paca. */ 221 /* Now we know the logical id of our boot cpu, setup the paca. */
219 setup_paca(&paca[boot_cpuid]); 222 setup_paca(&paca[boot_cpuid]);
220 fixup_boot_paca(); 223 fixup_boot_paca();
@@ -229,6 +232,8 @@ void __init early_setup(unsigned long dt_ptr)
229 /* Initialize the hash table or TLB handling */ 232 /* Initialize the hash table or TLB handling */
230 early_init_mmu(); 233 early_init_mmu();
231 234
235 kvm_cma_reserve();
236
232 /* 237 /*
233 * Reserve any gigantic pages requested on the command line. 238 * Reserve any gigantic pages requested on the command line.
234 * memblock needs to have been initialized by the time this is 239 * memblock needs to have been initialized by the time this is
@@ -237,6 +242,18 @@ void __init early_setup(unsigned long dt_ptr)
237 reserve_hugetlb_gpages(); 242 reserve_hugetlb_gpages();
238 243
239 DBG(" <- early_setup()\n"); 244 DBG(" <- early_setup()\n");
245
246#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
247 /*
248 * This needs to be done *last* (after the above DBG() even)
249 *
250 * Right after we return from this function, we turn on the MMU
251 * which means the real-mode access trick that btext does will
252 * no longer work, it needs to switch to using a real MMU
253 * mapping. This call will ensure that it does
254 */
255 btext_map();
256#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
240} 257}
241 258
242#ifdef CONFIG_SMP 259#ifdef CONFIG_SMP
@@ -305,14 +322,14 @@ static void __init initialize_cache_info(void)
305 * d-cache and i-cache sizes... -Peter 322 * d-cache and i-cache sizes... -Peter
306 */ 323 */
307 if (num_cpus == 1) { 324 if (num_cpus == 1) {
308 const u32 *sizep, *lsizep; 325 const __be32 *sizep, *lsizep;
309 u32 size, lsize; 326 u32 size, lsize;
310 327
311 size = 0; 328 size = 0;
312 lsize = cur_cpu_spec->dcache_bsize; 329 lsize = cur_cpu_spec->dcache_bsize;
313 sizep = of_get_property(np, "d-cache-size", NULL); 330 sizep = of_get_property(np, "d-cache-size", NULL);
314 if (sizep != NULL) 331 if (sizep != NULL)
315 size = *sizep; 332 size = be32_to_cpu(*sizep);
316 lsizep = of_get_property(np, "d-cache-block-size", 333 lsizep = of_get_property(np, "d-cache-block-size",
317 NULL); 334 NULL);
318 /* fallback if block size missing */ 335 /* fallback if block size missing */
@@ -321,8 +338,8 @@ static void __init initialize_cache_info(void)
321 "d-cache-line-size", 338 "d-cache-line-size",
322 NULL); 339 NULL);
323 if (lsizep != NULL) 340 if (lsizep != NULL)
324 lsize = *lsizep; 341 lsize = be32_to_cpu(*lsizep);
325 if (sizep == 0 || lsizep == 0) 342 if (sizep == NULL || lsizep == NULL)
326 DBG("Argh, can't find dcache properties ! " 343 DBG("Argh, can't find dcache properties ! "
327 "sizep: %p, lsizep: %p\n", sizep, lsizep); 344 "sizep: %p, lsizep: %p\n", sizep, lsizep);
328 345
@@ -335,7 +352,7 @@ static void __init initialize_cache_info(void)
335 lsize = cur_cpu_spec->icache_bsize; 352 lsize = cur_cpu_spec->icache_bsize;
336 sizep = of_get_property(np, "i-cache-size", NULL); 353 sizep = of_get_property(np, "i-cache-size", NULL);
337 if (sizep != NULL) 354 if (sizep != NULL)
338 size = *sizep; 355 size = be32_to_cpu(*sizep);
339 lsizep = of_get_property(np, "i-cache-block-size", 356 lsizep = of_get_property(np, "i-cache-block-size",
340 NULL); 357 NULL);
341 if (lsizep == NULL) 358 if (lsizep == NULL)
@@ -343,8 +360,8 @@ static void __init initialize_cache_info(void)
343 "i-cache-line-size", 360 "i-cache-line-size",
344 NULL); 361 NULL);
345 if (lsizep != NULL) 362 if (lsizep != NULL)
346 lsize = *lsizep; 363 lsize = be32_to_cpu(*lsizep);
347 if (sizep == 0 || lsizep == 0) 364 if (sizep == NULL || lsizep == NULL)
348 DBG("Argh, can't find icache properties ! " 365 DBG("Argh, can't find icache properties ! "
349 "sizep: %p, lsizep: %p\n", sizep, lsizep); 366 "sizep: %p, lsizep: %p\n", sizep, lsizep);
350 367
@@ -609,8 +626,6 @@ void __init setup_arch(char **cmdline_p)
609 /* Initialize the MMU context management stuff */ 626 /* Initialize the MMU context management stuff */
610 mmu_context_init(); 627 mmu_context_init();
611 628
612 kvm_linear_init();
613
614 /* Interrupt code needs to be 64K-aligned */ 629 /* Interrupt code needs to be 64K-aligned */
615 if ((unsigned long)_stext & 0xffff) 630 if ((unsigned long)_stext & 0xffff)
616 panic("Kernelbase not 64K-aligned (0x%lx)!\n", 631 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
@@ -701,8 +716,7 @@ void __init setup_per_cpu_areas(void)
701#endif 716#endif
702 717
703 718
704#ifdef CONFIG_PPC_INDIRECT_IO 719#if defined(CONFIG_PPC_INDIRECT_PIO) || defined(CONFIG_PPC_INDIRECT_MMIO)
705struct ppc_pci_io ppc_pci_io; 720struct ppc_pci_io ppc_pci_io;
706EXPORT_SYMBOL(ppc_pci_io); 721EXPORT_SYMBOL(ppc_pci_io);
707#endif /* CONFIG_PPC_INDIRECT_IO */ 722#endif
708
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 0f83122e6676..bebdf1a1a540 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -436,7 +436,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
436 * use altivec. Since VSCR only contains 32 bits saved in the least 436 * use altivec. Since VSCR only contains 32 bits saved in the least
437 * significant bits of a vector, we "cheat" and stuff VRSAVE in the 437 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
438 * most significant bits of that same vector. --BenH 438 * most significant bits of that same vector. --BenH
439 * Note that the current VRSAVE value is in the SPR at this point.
439 */ 440 */
441 if (cpu_has_feature(CPU_FTR_ALTIVEC))
442 current->thread.vrsave = mfspr(SPRN_VRSAVE);
440 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) 443 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
441 return 1; 444 return 1;
442#endif /* CONFIG_ALTIVEC */ 445#endif /* CONFIG_ALTIVEC */
@@ -557,6 +560,8 @@ static int save_tm_user_regs(struct pt_regs *regs,
557 * significant bits of a vector, we "cheat" and stuff VRSAVE in the 560 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
558 * most significant bits of that same vector. --BenH 561 * most significant bits of that same vector. --BenH
559 */ 562 */
563 if (cpu_has_feature(CPU_FTR_ALTIVEC))
564 current->thread.vrsave = mfspr(SPRN_VRSAVE);
560 if (__put_user(current->thread.vrsave, 565 if (__put_user(current->thread.vrsave,
561 (u32 __user *)&frame->mc_vregs[32])) 566 (u32 __user *)&frame->mc_vregs[32]))
562 return 1; 567 return 1;
@@ -696,6 +701,8 @@ static long restore_user_regs(struct pt_regs *regs,
696 /* Always get VRSAVE back */ 701 /* Always get VRSAVE back */
697 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 702 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
698 return 1; 703 return 1;
704 if (cpu_has_feature(CPU_FTR_ALTIVEC))
705 mtspr(SPRN_VRSAVE, current->thread.vrsave);
699#endif /* CONFIG_ALTIVEC */ 706#endif /* CONFIG_ALTIVEC */
700 if (copy_fpr_from_user(current, &sr->mc_fregs)) 707 if (copy_fpr_from_user(current, &sr->mc_fregs))
701 return 1; 708 return 1;
@@ -809,6 +816,8 @@ static long restore_tm_user_regs(struct pt_regs *regs,
809 __get_user(current->thread.transact_vrsave, 816 __get_user(current->thread.transact_vrsave,
810 (u32 __user *)&tm_sr->mc_vregs[32])) 817 (u32 __user *)&tm_sr->mc_vregs[32]))
811 return 1; 818 return 1;
819 if (cpu_has_feature(CPU_FTR_ALTIVEC))
820 mtspr(SPRN_VRSAVE, current->thread.vrsave);
812#endif /* CONFIG_ALTIVEC */ 821#endif /* CONFIG_ALTIVEC */
813 822
814 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); 823 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 887e99d85bc2..f93ec2835a13 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -96,8 +96,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
96 unsigned long msr = regs->msr; 96 unsigned long msr = regs->msr;
97 long err = 0; 97 long err = 0;
98 98
99 flush_fp_to_thread(current);
100
101#ifdef CONFIG_ALTIVEC 99#ifdef CONFIG_ALTIVEC
102 err |= __put_user(v_regs, &sc->v_regs); 100 err |= __put_user(v_regs, &sc->v_regs);
103 101
@@ -114,6 +112,8 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
114 /* We always copy to/from vrsave, it's 0 if we don't have or don't 112 /* We always copy to/from vrsave, it's 0 if we don't have or don't
115 * use altivec. 113 * use altivec.
116 */ 114 */
115 if (cpu_has_feature(CPU_FTR_ALTIVEC))
116 current->thread.vrsave = mfspr(SPRN_VRSAVE);
117 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 117 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
118#else /* CONFIG_ALTIVEC */ 118#else /* CONFIG_ALTIVEC */
119 err |= __put_user(0, &sc->v_regs); 119 err |= __put_user(0, &sc->v_regs);
@@ -217,6 +217,8 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
217 /* We always copy to/from vrsave, it's 0 if we don't have or don't 217 /* We always copy to/from vrsave, it's 0 if we don't have or don't
218 * use altivec. 218 * use altivec.
219 */ 219 */
220 if (cpu_has_feature(CPU_FTR_ALTIVEC))
221 current->thread.vrsave = mfspr(SPRN_VRSAVE);
220 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 222 err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
221 if (msr & MSR_VEC) 223 if (msr & MSR_VEC)
222 err |= __put_user(current->thread.transact_vrsave, 224 err |= __put_user(current->thread.transact_vrsave,
@@ -346,16 +348,18 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
346 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128))) 348 if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
347 return -EFAULT; 349 return -EFAULT;
348 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 350 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
349 if (v_regs != 0 && (msr & MSR_VEC) != 0) 351 if (v_regs != NULL && (msr & MSR_VEC) != 0)
350 err |= __copy_from_user(current->thread.vr, v_regs, 352 err |= __copy_from_user(current->thread.vr, v_regs,
351 33 * sizeof(vector128)); 353 33 * sizeof(vector128));
352 else if (current->thread.used_vr) 354 else if (current->thread.used_vr)
353 memset(current->thread.vr, 0, 33 * sizeof(vector128)); 355 memset(current->thread.vr, 0, 33 * sizeof(vector128));
354 /* Always get VRSAVE back */ 356 /* Always get VRSAVE back */
355 if (v_regs != 0) 357 if (v_regs != NULL)
356 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); 358 err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
357 else 359 else
358 current->thread.vrsave = 0; 360 current->thread.vrsave = 0;
361 if (cpu_has_feature(CPU_FTR_ALTIVEC))
362 mtspr(SPRN_VRSAVE, current->thread.vrsave);
359#endif /* CONFIG_ALTIVEC */ 363#endif /* CONFIG_ALTIVEC */
360 /* restore floating point */ 364 /* restore floating point */
361 err |= copy_fpr_from_user(current, &sc->fp_regs); 365 err |= copy_fpr_from_user(current, &sc->fp_regs);
@@ -463,7 +467,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
463 tm_v_regs, 34 * sizeof(vector128))) 467 tm_v_regs, 34 * sizeof(vector128)))
464 return -EFAULT; 468 return -EFAULT;
465 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */ 469 /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
466 if (v_regs != 0 && tm_v_regs != 0 && (msr & MSR_VEC) != 0) { 470 if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
467 err |= __copy_from_user(current->thread.vr, v_regs, 471 err |= __copy_from_user(current->thread.vr, v_regs,
468 33 * sizeof(vector128)); 472 33 * sizeof(vector128));
469 err |= __copy_from_user(current->thread.transact_vr, tm_v_regs, 473 err |= __copy_from_user(current->thread.transact_vr, tm_v_regs,
@@ -474,7 +478,7 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
474 memset(current->thread.transact_vr, 0, 33 * sizeof(vector128)); 478 memset(current->thread.transact_vr, 0, 33 * sizeof(vector128));
475 } 479 }
476 /* Always get VRSAVE back */ 480 /* Always get VRSAVE back */
477 if (v_regs != 0 && tm_v_regs != 0) { 481 if (v_regs != NULL && tm_v_regs != NULL) {
478 err |= __get_user(current->thread.vrsave, 482 err |= __get_user(current->thread.vrsave,
479 (u32 __user *)&v_regs[33]); 483 (u32 __user *)&v_regs[33]);
480 err |= __get_user(current->thread.transact_vrsave, 484 err |= __get_user(current->thread.transact_vrsave,
@@ -484,6 +488,8 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
484 current->thread.vrsave = 0; 488 current->thread.vrsave = 0;
485 current->thread.transact_vrsave = 0; 489 current->thread.transact_vrsave = 0;
486 } 490 }
491 if (cpu_has_feature(CPU_FTR_ALTIVEC))
492 mtspr(SPRN_VRSAVE, current->thread.vrsave);
487#endif /* CONFIG_ALTIVEC */ 493#endif /* CONFIG_ALTIVEC */
488 /* restore floating point */ 494 /* restore floating point */
489 err |= copy_fpr_from_user(current, &sc->fp_regs); 495 err |= copy_fpr_from_user(current, &sc->fp_regs);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 38b0ba65a735..442d8e23f8f4 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -81,6 +81,28 @@ int smt_enabled_at_boot = 1;
81 81
82static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; 82static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL;
83 83
84/*
85 * Returns 1 if the specified cpu should be brought up during boot.
86 * Used to inhibit booting threads if they've been disabled or
87 * limited on the command line
88 */
89int smp_generic_cpu_bootable(unsigned int nr)
90{
91 /* Special case - we inhibit secondary thread startup
92 * during boot if the user requests it.
93 */
94 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
95 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
96 return 0;
97 if (smt_enabled_at_boot
98 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
99 return 0;
100 }
101
102 return 1;
103}
104
105
84#ifdef CONFIG_PPC64 106#ifdef CONFIG_PPC64
85int smp_generic_kick_cpu(int nr) 107int smp_generic_kick_cpu(int nr)
86{ 108{
@@ -172,7 +194,7 @@ int smp_request_message_ipi(int virq, int msg)
172#endif 194#endif
173 err = request_irq(virq, smp_ipi_action[msg], 195 err = request_irq(virq, smp_ipi_action[msg],
174 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, 196 IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND,
175 smp_ipi_name[msg], 0); 197 smp_ipi_name[msg], NULL);
176 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n", 198 WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n",
177 virq, smp_ipi_name[msg], err); 199 virq, smp_ipi_name[msg], err);
178 200
@@ -210,6 +232,12 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
210 smp_ops->cause_ipi(cpu, info->data); 232 smp_ops->cause_ipi(cpu, info->data);
211} 233}
212 234
235#ifdef __BIG_ENDIAN__
236#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
237#else
238#define IPI_MESSAGE(A) (1 << (8 * (A)))
239#endif
240
213irqreturn_t smp_ipi_demux(void) 241irqreturn_t smp_ipi_demux(void)
214{ 242{
215 struct cpu_messages *info = &__get_cpu_var(ipi_message); 243 struct cpu_messages *info = &__get_cpu_var(ipi_message);
@@ -219,19 +247,14 @@ irqreturn_t smp_ipi_demux(void)
219 247
220 do { 248 do {
221 all = xchg(&info->messages, 0); 249 all = xchg(&info->messages, 0);
222 250 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
223#ifdef __BIG_ENDIAN
224 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
225 generic_smp_call_function_interrupt(); 251 generic_smp_call_function_interrupt();
226 if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) 252 if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
227 scheduler_ipi(); 253 scheduler_ipi();
228 if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) 254 if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNC_SINGLE))
229 generic_smp_call_function_single_interrupt(); 255 generic_smp_call_function_single_interrupt();
230 if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) 256 if (all & IPI_MESSAGE(PPC_MSG_DEBUGGER_BREAK))
231 debug_ipi_action(0, NULL); 257 debug_ipi_action(0, NULL);
232#else
233#error Unsupported ENDIAN
234#endif
235 } while (info->messages); 258 } while (info->messages);
236 259
237 return IRQ_HANDLED; 260 return IRQ_HANDLED;
@@ -574,6 +597,21 @@ out:
574 return id; 597 return id;
575} 598}
576 599
600/* Return the value of the chip-id property corresponding
601 * to the given logical cpu.
602 */
603int cpu_to_chip_id(int cpu)
604{
605 struct device_node *np;
606
607 np = of_get_cpu_node(cpu, NULL);
608 if (!np)
609 return -1;
610
611 of_node_put(np);
612 return of_get_ibm_chip_id(np);
613}
614
577/* Helper routines for cpu to core mapping */ 615/* Helper routines for cpu to core mapping */
578int cpu_core_index_of_thread(int cpu) 616int cpu_core_index_of_thread(int cpu)
579{ 617{
@@ -587,6 +625,33 @@ int cpu_first_thread_of_core(int core)
587} 625}
588EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); 626EXPORT_SYMBOL_GPL(cpu_first_thread_of_core);
589 627
628static void traverse_siblings_chip_id(int cpu, bool add, int chipid)
629{
630 const struct cpumask *mask;
631 struct device_node *np;
632 int i, plen;
633 const __be32 *prop;
634
635 mask = add ? cpu_online_mask : cpu_present_mask;
636 for_each_cpu(i, mask) {
637 np = of_get_cpu_node(i, NULL);
638 if (!np)
639 continue;
640 prop = of_get_property(np, "ibm,chip-id", &plen);
641 if (prop && plen == sizeof(int) &&
642 of_read_number(prop, 1) == chipid) {
643 if (add) {
644 cpumask_set_cpu(cpu, cpu_core_mask(i));
645 cpumask_set_cpu(i, cpu_core_mask(cpu));
646 } else {
647 cpumask_clear_cpu(cpu, cpu_core_mask(i));
648 cpumask_clear_cpu(i, cpu_core_mask(cpu));
649 }
650 }
651 of_node_put(np);
652 }
653}
654
590/* Must be called when no change can occur to cpu_present_mask, 655/* Must be called when no change can occur to cpu_present_mask,
591 * i.e. during cpu online or offline. 656 * i.e. during cpu online or offline.
592 */ 657 */
@@ -609,11 +674,51 @@ static struct device_node *cpu_to_l2cache(int cpu)
609 return cache; 674 return cache;
610} 675}
611 676
677static void traverse_core_siblings(int cpu, bool add)
678{
679 struct device_node *l2_cache, *np;
680 const struct cpumask *mask;
681 int i, chip, plen;
682 const __be32 *prop;
683
684 /* First see if we have ibm,chip-id properties in cpu nodes */
685 np = of_get_cpu_node(cpu, NULL);
686 if (np) {
687 chip = -1;
688 prop = of_get_property(np, "ibm,chip-id", &plen);
689 if (prop && plen == sizeof(int))
690 chip = of_read_number(prop, 1);
691 of_node_put(np);
692 if (chip >= 0) {
693 traverse_siblings_chip_id(cpu, add, chip);
694 return;
695 }
696 }
697
698 l2_cache = cpu_to_l2cache(cpu);
699 mask = add ? cpu_online_mask : cpu_present_mask;
700 for_each_cpu(i, mask) {
701 np = cpu_to_l2cache(i);
702 if (!np)
703 continue;
704 if (np == l2_cache) {
705 if (add) {
706 cpumask_set_cpu(cpu, cpu_core_mask(i));
707 cpumask_set_cpu(i, cpu_core_mask(cpu));
708 } else {
709 cpumask_clear_cpu(cpu, cpu_core_mask(i));
710 cpumask_clear_cpu(i, cpu_core_mask(cpu));
711 }
712 }
713 of_node_put(np);
714 }
715 of_node_put(l2_cache);
716}
717
612/* Activate a secondary processor. */ 718/* Activate a secondary processor. */
613void start_secondary(void *unused) 719void start_secondary(void *unused)
614{ 720{
615 unsigned int cpu = smp_processor_id(); 721 unsigned int cpu = smp_processor_id();
616 struct device_node *l2_cache;
617 int i, base; 722 int i, base;
618 723
619 atomic_inc(&init_mm.mm_count); 724 atomic_inc(&init_mm.mm_count);
@@ -652,18 +757,7 @@ void start_secondary(void *unused)
652 cpumask_set_cpu(cpu, cpu_core_mask(base + i)); 757 cpumask_set_cpu(cpu, cpu_core_mask(base + i));
653 cpumask_set_cpu(base + i, cpu_core_mask(cpu)); 758 cpumask_set_cpu(base + i, cpu_core_mask(cpu));
654 } 759 }
655 l2_cache = cpu_to_l2cache(cpu); 760 traverse_core_siblings(cpu, true);
656 for_each_online_cpu(i) {
657 struct device_node *np = cpu_to_l2cache(i);
658 if (!np)
659 continue;
660 if (np == l2_cache) {
661 cpumask_set_cpu(cpu, cpu_core_mask(i));
662 cpumask_set_cpu(i, cpu_core_mask(cpu));
663 }
664 of_node_put(np);
665 }
666 of_node_put(l2_cache);
667 761
668 smp_wmb(); 762 smp_wmb();
669 notify_cpu_starting(cpu); 763 notify_cpu_starting(cpu);
@@ -719,7 +813,6 @@ int arch_sd_sibling_asym_packing(void)
719#ifdef CONFIG_HOTPLUG_CPU 813#ifdef CONFIG_HOTPLUG_CPU
720int __cpu_disable(void) 814int __cpu_disable(void)
721{ 815{
722 struct device_node *l2_cache;
723 int cpu = smp_processor_id(); 816 int cpu = smp_processor_id();
724 int base, i; 817 int base, i;
725 int err; 818 int err;
@@ -739,20 +832,7 @@ int __cpu_disable(void)
739 cpumask_clear_cpu(cpu, cpu_core_mask(base + i)); 832 cpumask_clear_cpu(cpu, cpu_core_mask(base + i));
740 cpumask_clear_cpu(base + i, cpu_core_mask(cpu)); 833 cpumask_clear_cpu(base + i, cpu_core_mask(cpu));
741 } 834 }
742 835 traverse_core_siblings(cpu, false);
743 l2_cache = cpu_to_l2cache(cpu);
744 for_each_present_cpu(i) {
745 struct device_node *np = cpu_to_l2cache(i);
746 if (!np)
747 continue;
748 if (np == l2_cache) {
749 cpumask_clear_cpu(cpu, cpu_core_mask(i));
750 cpumask_clear_cpu(i, cpu_core_mask(cpu));
751 }
752 of_node_put(np);
753 }
754 of_node_put(l2_cache);
755
756 836
757 return 0; 837 return 0;
758} 838}
diff --git a/arch/powerpc/kernel/softemu8xx.c b/arch/powerpc/kernel/softemu8xx.c
deleted file mode 100644
index 29b2f81dd709..000000000000
--- a/arch/powerpc/kernel/softemu8xx.c
+++ /dev/null
@@ -1,199 +0,0 @@
1/*
2 * Software emulation of some PPC instructions for the 8xx core.
3 *
4 * Copyright (C) 1998 Dan Malek (dmalek@jlc.net)
5 *
6 * Software floating emuation for the MPC8xx processor. I did this mostly
7 * because it was easier than trying to get the libraries compiled for
8 * software floating point. The goal is still to get the libraries done,
9 * but I lost patience and needed some hacks to at least get init and
10 * shells running. The first problem is the setjmp/longjmp that save
11 * and restore the floating point registers.
12 *
13 * For this emulation, our working registers are found on the register
14 * save area.
15 */
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/user.h>
25#include <linux/interrupt.h>
26
27#include <asm/pgtable.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30
31/* Eventually we may need a look-up table, but this works for now.
32*/
33#define LFS 48
34#define LFD 50
35#define LFDU 51
36#define STFD 54
37#define STFDU 55
38#define FMR 63
39
40void print_8xx_pte(struct mm_struct *mm, unsigned long addr)
41{
42 pgd_t *pgd;
43 pmd_t *pmd;
44 pte_t *pte;
45
46 printk(" pte @ 0x%8lx: ", addr);
47 pgd = pgd_offset(mm, addr & PAGE_MASK);
48 if (pgd) {
49 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
50 addr & PAGE_MASK);
51 if (pmd && pmd_present(*pmd)) {
52 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
53 if (pte) {
54 printk(" (0x%08lx)->(0x%08lx)->0x%08lx\n",
55 (long)pgd, (long)pte, (long)pte_val(*pte));
56#define pp ((long)pte_val(*pte))
57 printk(" RPN: %05lx PP: %lx SPS: %lx SH: %lx "
58 "CI: %lx v: %lx\n",
59 pp>>12, /* rpn */
60 (pp>>10)&3, /* pp */
61 (pp>>3)&1, /* small */
62 (pp>>2)&1, /* shared */
63 (pp>>1)&1, /* cache inhibit */
64 pp&1 /* valid */
65 );
66#undef pp
67 }
68 else {
69 printk("no pte\n");
70 }
71 }
72 else {
73 printk("no pmd\n");
74 }
75 }
76 else {
77 printk("no pgd\n");
78 }
79}
80
81int get_8xx_pte(struct mm_struct *mm, unsigned long addr)
82{
83 pgd_t *pgd;
84 pmd_t *pmd;
85 pte_t *pte;
86 int retval = 0;
87
88 pgd = pgd_offset(mm, addr & PAGE_MASK);
89 if (pgd) {
90 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK),
91 addr & PAGE_MASK);
92 if (pmd && pmd_present(*pmd)) {
93 pte = pte_offset_kernel(pmd, addr & PAGE_MASK);
94 if (pte) {
95 retval = (int)pte_val(*pte);
96 }
97 }
98 }
99 return retval;
100}
101
102/*
103 * We return 0 on success, 1 on unimplemented instruction, and EFAULT
104 * if a load/store faulted.
105 */
106int Soft_emulate_8xx(struct pt_regs *regs)
107{
108 u32 inst, instword;
109 u32 flreg, idxreg, disp;
110 int retval;
111 s16 sdisp;
112 u32 *ea, *ip;
113
114 retval = 0;
115
116 instword = *((u32 *)regs->nip);
117 inst = instword >> 26;
118
119 flreg = (instword >> 21) & 0x1f;
120 idxreg = (instword >> 16) & 0x1f;
121 disp = instword & 0xffff;
122
123 ea = (u32 *)(regs->gpr[idxreg] + disp);
124 ip = (u32 *)&current->thread.TS_FPR(flreg);
125
126 switch ( inst )
127 {
128 case LFD:
129 /* this is a 16 bit quantity that is sign extended
130 * so use a signed short here -- Cort
131 */
132 sdisp = (instword & 0xffff);
133 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
134 if (copy_from_user(ip, ea, sizeof(double)))
135 retval = -EFAULT;
136 break;
137
138 case LFDU:
139 if (copy_from_user(ip, ea, sizeof(double)))
140 retval = -EFAULT;
141 else
142 regs->gpr[idxreg] = (u32)ea;
143 break;
144 case LFS:
145 sdisp = (instword & 0xffff);
146 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
147 if (copy_from_user(ip, ea, sizeof(float)))
148 retval = -EFAULT;
149 break;
150 case STFD:
151 /* this is a 16 bit quantity that is sign extended
152 * so use a signed short here -- Cort
153 */
154 sdisp = (instword & 0xffff);
155 ea = (u32 *)(regs->gpr[idxreg] + sdisp);
156 if (copy_to_user(ea, ip, sizeof(double)))
157 retval = -EFAULT;
158 break;
159
160 case STFDU:
161 if (copy_to_user(ea, ip, sizeof(double)))
162 retval = -EFAULT;
163 else
164 regs->gpr[idxreg] = (u32)ea;
165 break;
166 case FMR:
167 /* assume this is a fp move -- Cort */
168 memcpy(ip, &current->thread.TS_FPR((instword>>11)&0x1f),
169 sizeof(double));
170 break;
171 default:
172 retval = 1;
173 printk("Bad emulation %s/%d\n"
174 " NIP: %08lx instruction: %08x opcode: %x "
175 "A: %x B: %x C: %x code: %x rc: %x\n",
176 current->comm,current->pid,
177 regs->nip,
178 instword,inst,
179 (instword>>16)&0x1f,
180 (instword>>11)&0x1f,
181 (instword>>6)&0x1f,
182 (instword>>1)&0x3ff,
183 instword&1);
184 {
185 int pa;
186 print_8xx_pte(current->mm,regs->nip);
187 pa = get_8xx_pte(current->mm,regs->nip) & PAGE_MASK;
188 pa |= (regs->nip & ~PAGE_MASK);
189 pa = (unsigned long)__va(pa);
190 printk("Kernel VA for NIP %x ", pa);
191 print_8xx_pte(current->mm,pa);
192 }
193 }
194
195 if (retval == 0)
196 regs->nip += 4;
197
198 return retval;
199}
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 86ac1d90d02b..22045984835f 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -46,10 +46,19 @@
46#define SL_r29 0xe8 46#define SL_r29 0xe8
47#define SL_r30 0xf0 47#define SL_r30 0xf0
48#define SL_r31 0xf8 48#define SL_r31 0xf8
49#define SL_SIZE SL_r31+8 49#define SL_SPRG1 0x100
50#define SL_TCR 0x108
51#define SL_SIZE SL_TCR+8
50 52
51/* these macros rely on the save area being 53/* these macros rely on the save area being
52 * pointed to by r11 */ 54 * pointed to by r11 */
55
56#define SAVE_SPR(register) \
57 mfspr r0, SPRN_##register ;\
58 std r0, SL_##register(r11)
59#define RESTORE_SPR(register) \
60 ld r0, SL_##register(r11) ;\
61 mtspr SPRN_##register, r0
53#define SAVE_SPECIAL(special) \ 62#define SAVE_SPECIAL(special) \
54 mf##special r0 ;\ 63 mf##special r0 ;\
55 std r0, SL_##special(r11) 64 std r0, SL_##special(r11)
@@ -103,8 +112,15 @@ _GLOBAL(swsusp_arch_suspend)
103 SAVE_REGISTER(r30) 112 SAVE_REGISTER(r30)
104 SAVE_REGISTER(r31) 113 SAVE_REGISTER(r31)
105 SAVE_SPECIAL(MSR) 114 SAVE_SPECIAL(MSR)
106 SAVE_SPECIAL(SDR1)
107 SAVE_SPECIAL(XER) 115 SAVE_SPECIAL(XER)
116#ifdef CONFIG_PPC_BOOK3S_64
117 SAVE_SPECIAL(SDR1)
118#else
119 SAVE_SPR(TCR)
120
121 /* Save SPRG1, SPRG1 be used save paca */
122 SAVE_SPR(SPRG1)
123#endif
108 124
109 /* we push the stack up 128 bytes but don't store the 125 /* we push the stack up 128 bytes but don't store the
110 * stack pointer on the stack like a real stackframe */ 126 * stack pointer on the stack like a real stackframe */
@@ -151,6 +167,7 @@ copy_page_loop:
151 bne+ copyloop 167 bne+ copyloop
152nothing_to_copy: 168nothing_to_copy:
153 169
170#ifdef CONFIG_PPC_BOOK3S_64
154 /* flush caches */ 171 /* flush caches */
155 lis r3, 0x10 172 lis r3, 0x10
156 mtctr r3 173 mtctr r3
@@ -167,6 +184,7 @@ nothing_to_copy:
167 sync 184 sync
168 185
169 tlbia 186 tlbia
187#endif
170 188
171 ld r11,swsusp_save_area_ptr@toc(r2) 189 ld r11,swsusp_save_area_ptr@toc(r2)
172 190
@@ -208,16 +226,39 @@ nothing_to_copy:
208 RESTORE_REGISTER(r29) 226 RESTORE_REGISTER(r29)
209 RESTORE_REGISTER(r30) 227 RESTORE_REGISTER(r30)
210 RESTORE_REGISTER(r31) 228 RESTORE_REGISTER(r31)
229
230#ifdef CONFIG_PPC_BOOK3S_64
211 /* can't use RESTORE_SPECIAL(MSR) */ 231 /* can't use RESTORE_SPECIAL(MSR) */
212 ld r0, SL_MSR(r11) 232 ld r0, SL_MSR(r11)
213 mtmsrd r0, 0 233 mtmsrd r0, 0
214 RESTORE_SPECIAL(SDR1) 234 RESTORE_SPECIAL(SDR1)
235#else
236 /* Restore SPRG1, be used to save paca */
237 ld r0, SL_SPRG1(r11)
238 mtsprg 1, r0
239
240 RESTORE_SPECIAL(MSR)
241
242 /* Restore TCR and clear any pending bits in TSR. */
243 RESTORE_SPR(TCR)
244 lis r0, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h
245 mtspr SPRN_TSR, r0
246
247 /* Kick decrementer */
248 li r0, 1
249 mtdec r0
250
251 /* Invalidate all tlbs */
252 bl _tlbil_all
253#endif
215 RESTORE_SPECIAL(XER) 254 RESTORE_SPECIAL(XER)
216 255
217 sync 256 sync
218 257
219 addi r1,r1,-128 258 addi r1,r1,-128
259#ifdef CONFIG_PPC_BOOK3S_64
220 bl slb_flush_and_rebolt 260 bl slb_flush_and_rebolt
261#endif
221 bl do_after_copyback 262 bl do_after_copyback
222 addi r1,r1,128 263 addi r1,r1,128
223 264
diff --git a/arch/powerpc/kernel/swsusp_booke.S b/arch/powerpc/kernel/swsusp_booke.S
index 11a39307dd71..0f204053e5b5 100644
--- a/arch/powerpc/kernel/swsusp_booke.S
+++ b/arch/powerpc/kernel/swsusp_booke.S
@@ -141,6 +141,14 @@ _GLOBAL(swsusp_arch_resume)
141 lis r11,swsusp_save_area@h 141 lis r11,swsusp_save_area@h
142 ori r11,r11,swsusp_save_area@l 142 ori r11,r11,swsusp_save_area@l
143 143
144 /*
145 * Mappings from virtual addresses to physical addresses may be
146 * different than they were prior to restoring hibernation state.
147 * Invalidate the TLB so that the boot CPU is using the new
148 * mappings.
149 */
150 bl _tlbil_all
151
144 lwz r4,SL_SPRG0(r11) 152 lwz r4,SL_SPRG0(r11)
145 mtsprg 0,r4 153 mtsprg 0,r4
146 lwz r4,SL_SPRG1(r11) 154 lwz r4,SL_SPRG1(r11)
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 65ab9e909377..192b051df97e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -210,18 +210,18 @@ static u64 scan_dispatch_log(u64 stop_tb)
210 if (!dtl) 210 if (!dtl)
211 return 0; 211 return 0;
212 212
213 if (i == vpa->dtl_idx) 213 if (i == be64_to_cpu(vpa->dtl_idx))
214 return 0; 214 return 0;
215 while (i < vpa->dtl_idx) { 215 while (i < be64_to_cpu(vpa->dtl_idx)) {
216 if (dtl_consumer) 216 if (dtl_consumer)
217 dtl_consumer(dtl, i); 217 dtl_consumer(dtl, i);
218 dtb = dtl->timebase; 218 dtb = be64_to_cpu(dtl->timebase);
219 tb_delta = dtl->enqueue_to_dispatch_time + 219 tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
220 dtl->ready_to_enqueue_time; 220 be32_to_cpu(dtl->ready_to_enqueue_time);
221 barrier(); 221 barrier();
222 if (i + N_DISPATCH_LOG < vpa->dtl_idx) { 222 if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) {
223 /* buffer has overflowed */ 223 /* buffer has overflowed */
224 i = vpa->dtl_idx - N_DISPATCH_LOG; 224 i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
225 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); 225 dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG);
226 continue; 226 continue;
227 } 227 }
@@ -269,7 +269,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
269{ 269{
270 u64 stolen = 0; 270 u64 stolen = 0;
271 271
272 if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { 272 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
273 stolen = scan_dispatch_log(stop_tb); 273 stolen = scan_dispatch_log(stop_tb);
274 get_paca()->system_time -= stolen; 274 get_paca()->system_time -= stolen;
275 } 275 }
@@ -612,7 +612,7 @@ unsigned long long sched_clock(void)
612static int __init get_freq(char *name, int cells, unsigned long *val) 612static int __init get_freq(char *name, int cells, unsigned long *val)
613{ 613{
614 struct device_node *cpu; 614 struct device_node *cpu;
615 const unsigned int *fp; 615 const __be32 *fp;
616 int found = 0; 616 int found = 0;
617 617
618 /* The cpu node should have timebase and clock frequency properties */ 618 /* The cpu node should have timebase and clock frequency properties */
@@ -1049,7 +1049,7 @@ static int __init rtc_init(void)
1049 1049
1050 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0); 1050 pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
1051 1051
1052 return PTR_RET(pdev); 1052 return PTR_ERR_OR_ZERO(pdev);
1053} 1053}
1054 1054
1055module_init(rtc_init); 1055module_init(rtc_init);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb24803..7b60b9851469 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -155,10 +155,10 @@ _GLOBAL(tm_reclaim)
155 mfvscr vr0 155 mfvscr vr0
156 li r6, THREAD_TRANSACT_VSCR 156 li r6, THREAD_TRANSACT_VSCR
157 stvx vr0, r3, r6 157 stvx vr0, r3, r6
158dont_backup_vec:
158 mfspr r0, SPRN_VRSAVE 159 mfspr r0, SPRN_VRSAVE
159 std r0, THREAD_TRANSACT_VRSAVE(r3) 160 std r0, THREAD_TRANSACT_VRSAVE(r3)
160 161
161dont_backup_vec:
162 andi. r0, r4, MSR_FP 162 andi. r0, r4, MSR_FP
163 beq dont_backup_fp 163 beq dont_backup_fp
164 164
@@ -233,6 +233,16 @@ dont_backup_fp:
233 std r5, _CCR(r7) 233 std r5, _CCR(r7)
234 std r6, _XER(r7) 234 std r6, _XER(r7)
235 235
236
237 /* ******************** TAR, PPR, DSCR ********** */
238 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR
240 mfspr r5, SPRN_DSCR
241
242 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245
236 /* MSR and flags: We don't change CRs, and we don't need to alter 246 /* MSR and flags: We don't change CRs, and we don't need to alter
237 * MSR. 247 * MSR.
238 */ 248 */
@@ -331,11 +341,11 @@ _GLOBAL(tm_recheckpoint)
331 lvx vr0, r3, r5 341 lvx vr0, r3, r5
332 mtvscr vr0 342 mtvscr vr0
333 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */ 343 REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
344dont_restore_vec:
334 ld r5, THREAD_VRSAVE(r3) 345 ld r5, THREAD_VRSAVE(r3)
335 mtspr SPRN_VRSAVE, r5 346 mtspr SPRN_VRSAVE, r5
336#endif 347#endif
337 348
338dont_restore_vec:
339 andi. r0, r4, MSR_FP 349 andi. r0, r4, MSR_FP
340 beq dont_restore_fp 350 beq dont_restore_fp
341 351
@@ -347,6 +357,16 @@ dont_restore_fp:
347 mtmsr r6 /* FP/Vec off again! */ 357 mtmsr r6 /* FP/Vec off again! */
348 358
349restore_gprs: 359restore_gprs:
360
361 /* ******************** TAR, PPR, DSCR ********** */
362 ld r4, THREAD_TM_TAR(r3)
363 ld r5, THREAD_TM_PPR(r3)
364 ld r6, THREAD_TM_DSCR(r3)
365
366 mtspr SPRN_TAR, r4
367 mtspr SPRN_PPR, r5
368 mtspr SPRN_DSCR, r6
369
350 /* ******************** CR,LR,CCR,MSR ********** */ 370 /* ******************** CR,LR,CCR,MSR ********** */
351 ld r3, _CTR(r7) 371 ld r3, _CTR(r7)
352 ld r4, _LINK(r7) 372 ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22e38a4..f783c932faeb 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
44#include <asm/machdep.h> 44#include <asm/machdep.h>
45#include <asm/rtas.h> 45#include <asm/rtas.h>
46#include <asm/pmc.h> 46#include <asm/pmc.h>
47#ifdef CONFIG_PPC32
48#include <asm/reg.h> 47#include <asm/reg.h>
49#endif
50#ifdef CONFIG_PMAC_BACKLIGHT 48#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h> 49#include <asm/backlight.h>
52#endif 50#endif
@@ -62,6 +60,7 @@
62#include <asm/switch_to.h> 60#include <asm/switch_to.h>
63#include <asm/tm.h> 61#include <asm/tm.h>
64#include <asm/debug.h> 62#include <asm/debug.h>
63#include <sysdev/fsl_pci.h>
65 64
66#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 65#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
67int (*__debugger)(struct pt_regs *regs) __read_mostly; 66int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -567,6 +566,8 @@ int machine_check_e500(struct pt_regs *regs)
567 if (reason & MCSR_BUS_RBERR) { 566 if (reason & MCSR_BUS_RBERR) {
568 if (fsl_rio_mcheck_exception(regs)) 567 if (fsl_rio_mcheck_exception(regs))
569 return 1; 568 return 1;
569 if (fsl_pci_mcheck_exception(regs))
570 return 1;
570 } 571 }
571 572
572 printk("Machine check in kernel mode.\n"); 573 printk("Machine check in kernel mode.\n");
@@ -964,7 +965,7 @@ static int emulate_instruction(struct pt_regs *regs)
964 u32 instword; 965 u32 instword;
965 u32 rd; 966 u32 rd;
966 967
967 if (!user_mode(regs) || (regs->msr & MSR_LE)) 968 if (!user_mode(regs))
968 return -EINVAL; 969 return -EINVAL;
969 CHECK_FULL_REGS(regs); 970 CHECK_FULL_REGS(regs);
970 971
@@ -1052,11 +1053,41 @@ int is_valid_bugaddr(unsigned long addr)
1052 return is_kernel_addr(addr); 1053 return is_kernel_addr(addr);
1053} 1054}
1054 1055
1056#ifdef CONFIG_MATH_EMULATION
1057static int emulate_math(struct pt_regs *regs)
1058{
1059 int ret;
1060 extern int do_mathemu(struct pt_regs *regs);
1061
1062 ret = do_mathemu(regs);
1063 if (ret >= 0)
1064 PPC_WARN_EMULATED(math, regs);
1065
1066 switch (ret) {
1067 case 0:
1068 emulate_single_step(regs);
1069 return 0;
1070 case 1: {
1071 int code = 0;
1072 code = __parse_fpscr(current->thread.fpscr.val);
1073 _exception(SIGFPE, regs, code, regs->nip);
1074 return 0;
1075 }
1076 case -EFAULT:
1077 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1078 return 0;
1079 }
1080
1081 return -1;
1082}
1083#else
1084static inline int emulate_math(struct pt_regs *regs) { return -1; }
1085#endif
1086
1055void __kprobes program_check_exception(struct pt_regs *regs) 1087void __kprobes program_check_exception(struct pt_regs *regs)
1056{ 1088{
1057 enum ctx_state prev_state = exception_enter(); 1089 enum ctx_state prev_state = exception_enter();
1058 unsigned int reason = get_reason(regs); 1090 unsigned int reason = get_reason(regs);
1059 extern int do_mathemu(struct pt_regs *regs);
1060 1091
1061 /* We can now get here via a FP Unavailable exception if the core 1092 /* We can now get here via a FP Unavailable exception if the core
1062 * has no FPU, in that case the reason flags will be 0 */ 1093 * has no FPU, in that case the reason flags will be 0 */
@@ -1118,11 +1149,20 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1118 } 1149 }
1119#endif 1150#endif
1120 1151
1152 /*
1153 * If we took the program check in the kernel skip down to sending a
1154 * SIGILL. The subsequent cases all relate to emulating instructions
1155 * which we should only do for userspace. We also do not want to enable
1156 * interrupts for kernel faults because that might lead to further
1157 * faults, and loose the context of the original exception.
1158 */
1159 if (!user_mode(regs))
1160 goto sigill;
1161
1121 /* We restore the interrupt state now */ 1162 /* We restore the interrupt state now */
1122 if (!arch_irq_disabled_regs(regs)) 1163 if (!arch_irq_disabled_regs(regs))
1123 local_irq_enable(); 1164 local_irq_enable();
1124 1165
1125#ifdef CONFIG_MATH_EMULATION
1126 /* (reason & REASON_ILLEGAL) would be the obvious thing here, 1166 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1127 * but there seems to be a hardware bug on the 405GP (RevD) 1167 * but there seems to be a hardware bug on the 405GP (RevD)
1128 * that means ESR is sometimes set incorrectly - either to 1168 * that means ESR is sometimes set incorrectly - either to
@@ -1131,31 +1171,8 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1131 * instruction or only on FP instructions, whether there is a 1171 * instruction or only on FP instructions, whether there is a
1132 * pattern to occurrences etc. -dgibson 31/Mar/2003 1172 * pattern to occurrences etc. -dgibson 31/Mar/2003
1133 */ 1173 */
1134 1174 if (!emulate_math(regs))
1135 /*
1136 * If we support a HW FPU, we need to ensure the FP state
1137 * if flushed into the thread_struct before attempting
1138 * emulation
1139 */
1140#ifdef CONFIG_PPC_FPU
1141 flush_fp_to_thread(current);
1142#endif
1143 switch (do_mathemu(regs)) {
1144 case 0:
1145 emulate_single_step(regs);
1146 goto bail; 1175 goto bail;
1147 case 1: {
1148 int code = 0;
1149 code = __parse_fpscr(current->thread.fpscr.val);
1150 _exception(SIGFPE, regs, code, regs->nip);
1151 goto bail;
1152 }
1153 case -EFAULT:
1154 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1155 goto bail;
1156 }
1157 /* fall through on any other errors */
1158#endif /* CONFIG_MATH_EMULATION */
1159 1176
1160 /* Try to emulate it if we should. */ 1177 /* Try to emulate it if we should. */
1161 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) { 1178 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
@@ -1170,6 +1187,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1170 } 1187 }
1171 } 1188 }
1172 1189
1190sigill:
1173 if (reason & REASON_PRIVILEGED) 1191 if (reason & REASON_PRIVILEGED)
1174 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1192 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1175 else 1193 else
@@ -1296,43 +1314,51 @@ void vsx_unavailable_exception(struct pt_regs *regs)
1296 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); 1314 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1297} 1315}
1298 1316
1317#ifdef CONFIG_PPC64
1299void facility_unavailable_exception(struct pt_regs *regs) 1318void facility_unavailable_exception(struct pt_regs *regs)
1300{ 1319{
1301 static char *facility_strings[] = { 1320 static char *facility_strings[] = {
1302 "FPU", 1321 [FSCR_FP_LG] = "FPU",
1303 "VMX/VSX", 1322 [FSCR_VECVSX_LG] = "VMX/VSX",
1304 "DSCR", 1323 [FSCR_DSCR_LG] = "DSCR",
1305 "PMU SPRs", 1324 [FSCR_PM_LG] = "PMU SPRs",
1306 "BHRB", 1325 [FSCR_BHRB_LG] = "BHRB",
1307 "TM", 1326 [FSCR_TM_LG] = "TM",
1308 "AT", 1327 [FSCR_EBB_LG] = "EBB",
1309 "EBB", 1328 [FSCR_TAR_LG] = "TAR",
1310 "TAR",
1311 }; 1329 };
1312 char *facility, *prefix; 1330 char *facility = "unknown";
1313 u64 value; 1331 u64 value;
1332 u8 status;
1333 bool hv;
1314 1334
1315 if (regs->trap == 0xf60) { 1335 hv = (regs->trap == 0xf80);
1316 value = mfspr(SPRN_FSCR); 1336 if (hv)
1317 prefix = "";
1318 } else {
1319 value = mfspr(SPRN_HFSCR); 1337 value = mfspr(SPRN_HFSCR);
1320 prefix = "Hypervisor "; 1338 else
1339 value = mfspr(SPRN_FSCR);
1340
1341 status = value >> 56;
1342 if (status == FSCR_DSCR_LG) {
1343 /* User is acessing the DSCR. Set the inherit bit and allow
1344 * the user to set it directly in future by setting via the
1345 * FSCR DSCR bit. We always leave HFSCR DSCR set.
1346 */
1347 current->thread.dscr_inherit = 1;
1348 mtspr(SPRN_FSCR, value | FSCR_DSCR);
1349 return;
1321 } 1350 }
1322 1351
1323 value = value >> 56; 1352 if ((status < ARRAY_SIZE(facility_strings)) &&
1353 facility_strings[status])
1354 facility = facility_strings[status];
1324 1355
1325 /* We restore the interrupt state now */ 1356 /* We restore the interrupt state now */
1326 if (!arch_irq_disabled_regs(regs)) 1357 if (!arch_irq_disabled_regs(regs))
1327 local_irq_enable(); 1358 local_irq_enable();
1328 1359
1329 if (value < ARRAY_SIZE(facility_strings))
1330 facility = facility_strings[value];
1331 else
1332 facility = "unknown";
1333
1334 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", 1360 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1335 prefix, facility, regs->nip, regs->msr); 1361 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1336 1362
1337 if (user_mode(regs)) { 1363 if (user_mode(regs)) {
1338 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1364 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1367,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
1341 1367
1342 die("Unexpected facility unavailable exception", regs, SIGABRT); 1368 die("Unexpected facility unavailable exception", regs, SIGABRT);
1343} 1369}
1370#endif
1344 1371
1345#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1372#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 1373
@@ -1434,11 +1461,6 @@ void performance_monitor_exception(struct pt_regs *regs)
1434#ifdef CONFIG_8xx 1461#ifdef CONFIG_8xx
1435void SoftwareEmulation(struct pt_regs *regs) 1462void SoftwareEmulation(struct pt_regs *regs)
1436{ 1463{
1437 extern int do_mathemu(struct pt_regs *);
1438#if defined(CONFIG_MATH_EMULATION)
1439 int errcode;
1440#endif
1441
1442 CHECK_FULL_REGS(regs); 1464 CHECK_FULL_REGS(regs);
1443 1465
1444 if (!user_mode(regs)) { 1466 if (!user_mode(regs)) {
@@ -1446,31 +1468,10 @@ void SoftwareEmulation(struct pt_regs *regs)
1446 die("Kernel Mode Software FPU Emulation", regs, SIGFPE); 1468 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1447 } 1469 }
1448 1470
1449#ifdef CONFIG_MATH_EMULATION 1471 if (!emulate_math(regs))
1450 errcode = do_mathemu(regs);
1451 if (errcode >= 0)
1452 PPC_WARN_EMULATED(math, regs);
1453
1454 switch (errcode) {
1455 case 0:
1456 emulate_single_step(regs);
1457 return;
1458 case 1: {
1459 int code = 0;
1460 code = __parse_fpscr(current->thread.fpscr.val);
1461 _exception(SIGFPE, regs, code, regs->nip);
1462 return;
1463 }
1464 case -EFAULT:
1465 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1466 return;
1467 default:
1468 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1469 return; 1472 return;
1470 } 1473
1471#else
1472 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1474 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1473#endif
1474} 1475}
1475#endif /* CONFIG_8xx */ 1476#endif /* CONFIG_8xx */
1476 1477
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 6837f839ab78..75702e207b29 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -18,23 +18,19 @@ extern void real_writeb(u8 data, volatile u8 __iomem *addr);
18extern u8 real_205_readb(volatile u8 __iomem *addr); 18extern u8 real_205_readb(volatile u8 __iomem *addr);
19extern void real_205_writeb(u8 data, volatile u8 __iomem *addr); 19extern void real_205_writeb(u8 data, volatile u8 __iomem *addr);
20 20
21struct NS16550 { 21#define UART_RBR 0
22 /* this struct must be packed */ 22#define UART_IER 1
23 unsigned char rbr; /* 0 */ 23#define UART_FCR 2
24 unsigned char ier; /* 1 */ 24#define UART_LCR 3
25 unsigned char fcr; /* 2 */ 25#define UART_MCR 4
26 unsigned char lcr; /* 3 */ 26#define UART_LSR 5
27 unsigned char mcr; /* 4 */ 27#define UART_MSR 6
28 unsigned char lsr; /* 5 */ 28#define UART_SCR 7
29 unsigned char msr; /* 6 */ 29#define UART_THR UART_RBR
30 unsigned char scr; /* 7 */ 30#define UART_IIR UART_FCR
31}; 31#define UART_DLL UART_RBR
32 32#define UART_DLM UART_IER
33#define thr rbr 33#define UART_DLAB UART_LCR
34#define iir fcr
35#define dll rbr
36#define dlm ier
37#define dlab lcr
38 34
39#define LSR_DR 0x01 /* Data ready */ 35#define LSR_DR 0x01 /* Data ready */
40#define LSR_OE 0x02 /* Overrun */ 36#define LSR_OE 0x02 /* Overrun */
@@ -47,52 +43,62 @@ struct NS16550 {
47 43
48#define LCR_DLAB 0x80 44#define LCR_DLAB 0x80
49 45
50static struct NS16550 __iomem *udbg_comport; 46static u8 (*udbg_uart_in)(unsigned int reg);
47static void (*udbg_uart_out)(unsigned int reg, u8 data);
51 48
52static void udbg_550_flush(void) 49static void udbg_uart_flush(void)
53{ 50{
54 if (udbg_comport) { 51 if (!udbg_uart_in)
55 while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0) 52 return;
56 /* wait for idle */; 53
57 } 54 /* wait for idle */
55 while ((udbg_uart_in(UART_LSR) & LSR_THRE) == 0)
56 cpu_relax();
58} 57}
59 58
60static void udbg_550_putc(char c) 59static void udbg_uart_putc(char c)
61{ 60{
62 if (udbg_comport) { 61 if (!udbg_uart_out)
63 if (c == '\n') 62 return;
64 udbg_550_putc('\r'); 63
65 udbg_550_flush(); 64 if (c == '\n')
66 out_8(&udbg_comport->thr, c); 65 udbg_uart_putc('\r');
67 } 66 udbg_uart_flush();
67 udbg_uart_out(UART_THR, c);
68} 68}
69 69
70static int udbg_550_getc_poll(void) 70static int udbg_uart_getc_poll(void)
71{ 71{
72 if (udbg_comport) { 72 if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR))
73 if ((in_8(&udbg_comport->lsr) & LSR_DR) != 0) 73 return udbg_uart_in(UART_RBR);
74 return in_8(&udbg_comport->rbr);
75 else
76 return -1;
77 }
78 return -1; 74 return -1;
79} 75}
80 76
81static int udbg_550_getc(void) 77static int udbg_uart_getc(void)
82{ 78{
83 if (udbg_comport) { 79 if (!udbg_uart_in)
84 while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0) 80 return -1;
85 /* wait for char */; 81 /* wait for char */
86 return in_8(&udbg_comport->rbr); 82 while (!(udbg_uart_in(UART_LSR) & LSR_DR))
87 } 83 cpu_relax();
88 return -1; 84 return udbg_uart_in(UART_RBR);
85}
86
87static void udbg_use_uart(void)
88{
89 udbg_putc = udbg_uart_putc;
90 udbg_flush = udbg_uart_flush;
91 udbg_getc = udbg_uart_getc;
92 udbg_getc_poll = udbg_uart_getc_poll;
89} 93}
90 94
91void udbg_init_uart(void __iomem *comport, unsigned int speed, 95void udbg_uart_setup(unsigned int speed, unsigned int clock)
92 unsigned int clock)
93{ 96{
94 unsigned int dll, base_bauds; 97 unsigned int dll, base_bauds;
95 98
99 if (!udbg_uart_out)
100 return;
101
96 if (clock == 0) 102 if (clock == 0)
97 clock = 1843200; 103 clock = 1843200;
98 if (speed == 0) 104 if (speed == 0)
@@ -101,51 +107,43 @@ void udbg_init_uart(void __iomem *comport, unsigned int speed,
101 base_bauds = clock / 16; 107 base_bauds = clock / 16;
102 dll = base_bauds / speed; 108 dll = base_bauds / speed;
103 109
104 if (comport) { 110 udbg_uart_out(UART_LCR, 0x00);
105 udbg_comport = (struct NS16550 __iomem *)comport; 111 udbg_uart_out(UART_IER, 0xff);
106 out_8(&udbg_comport->lcr, 0x00); 112 udbg_uart_out(UART_IER, 0x00);
107 out_8(&udbg_comport->ier, 0xff); 113 udbg_uart_out(UART_LCR, LCR_DLAB);
108 out_8(&udbg_comport->ier, 0x00); 114 udbg_uart_out(UART_DLL, dll & 0xff);
109 out_8(&udbg_comport->lcr, LCR_DLAB); 115 udbg_uart_out(UART_DLM, dll >> 8);
110 out_8(&udbg_comport->dll, dll & 0xff); 116 /* 8 data, 1 stop, no parity */
111 out_8(&udbg_comport->dlm, dll >> 8); 117 udbg_uart_out(UART_LCR, 0x3);
112 /* 8 data, 1 stop, no parity */ 118 /* RTS/DTR */
113 out_8(&udbg_comport->lcr, 0x03); 119 udbg_uart_out(UART_MCR, 0x3);
114 /* RTS/DTR */ 120 /* Clear & enable FIFOs */
115 out_8(&udbg_comport->mcr, 0x03); 121 udbg_uart_out(UART_FCR, 0x7);
116 /* Clear & enable FIFOs */
117 out_8(&udbg_comport->fcr ,0x07);
118 udbg_putc = udbg_550_putc;
119 udbg_flush = udbg_550_flush;
120 udbg_getc = udbg_550_getc;
121 udbg_getc_poll = udbg_550_getc_poll;
122 }
123} 122}
124 123
125unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) 124unsigned int udbg_probe_uart_speed(unsigned int clock)
126{ 125{
127 unsigned int dll, dlm, divisor, prescaler, speed; 126 unsigned int dll, dlm, divisor, prescaler, speed;
128 u8 old_lcr; 127 u8 old_lcr;
129 struct NS16550 __iomem *port = comport;
130 128
131 old_lcr = in_8(&port->lcr); 129 old_lcr = udbg_uart_in(UART_LCR);
132 130
133 /* select divisor latch registers. */ 131 /* select divisor latch registers. */
134 out_8(&port->lcr, LCR_DLAB); 132 udbg_uart_out(UART_LCR, old_lcr | LCR_DLAB);
135 133
136 /* now, read the divisor */ 134 /* now, read the divisor */
137 dll = in_8(&port->dll); 135 dll = udbg_uart_in(UART_DLL);
138 dlm = in_8(&port->dlm); 136 dlm = udbg_uart_in(UART_DLM);
139 divisor = dlm << 8 | dll; 137 divisor = dlm << 8 | dll;
140 138
141 /* check prescaling */ 139 /* check prescaling */
142 if (in_8(&port->mcr) & 0x80) 140 if (udbg_uart_in(UART_MCR) & 0x80)
143 prescaler = 4; 141 prescaler = 4;
144 else 142 else
145 prescaler = 1; 143 prescaler = 1;
146 144
147 /* restore the LCR */ 145 /* restore the LCR */
148 out_8(&port->lcr, old_lcr); 146 udbg_uart_out(UART_LCR, old_lcr);
149 147
150 /* calculate speed */ 148 /* calculate speed */
151 speed = (clock / prescaler) / (divisor * 16); 149 speed = (clock / prescaler) / (divisor * 16);
@@ -157,195 +155,155 @@ unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock)
157 return speed; 155 return speed;
158} 156}
159 157
160#ifdef CONFIG_PPC_MAPLE 158static union {
161void udbg_maple_real_flush(void) 159 unsigned char __iomem *mmio_base;
160 unsigned long pio_base;
161} udbg_uart;
162
163static unsigned int udbg_uart_stride = 1;
164
165static u8 udbg_uart_in_pio(unsigned int reg)
162{ 166{
163 if (udbg_comport) { 167 return inb(udbg_uart.pio_base + (reg * udbg_uart_stride));
164 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
165 /* wait for idle */;
166 }
167} 168}
168 169
169void udbg_maple_real_putc(char c) 170static void udbg_uart_out_pio(unsigned int reg, u8 data)
170{ 171{
171 if (udbg_comport) { 172 outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
172 if (c == '\n')
173 udbg_maple_real_putc('\r');
174 udbg_maple_real_flush();
175 real_writeb(c, &udbg_comport->thr); eieio();
176 }
177} 173}
178 174
179void __init udbg_init_maple_realmode(void) 175void udbg_uart_init_pio(unsigned long port, unsigned int stride)
180{ 176{
181 udbg_comport = (struct NS16550 __iomem *)0xf40003f8; 177 if (!port)
182 178 return;
183 udbg_putc = udbg_maple_real_putc; 179 udbg_uart.pio_base = port;
184 udbg_flush = udbg_maple_real_flush; 180 udbg_uart_stride = stride;
185 udbg_getc = NULL; 181 udbg_uart_in = udbg_uart_in_pio;
186 udbg_getc_poll = NULL; 182 udbg_uart_out = udbg_uart_out_pio;
183 udbg_use_uart();
187} 184}
188#endif /* CONFIG_PPC_MAPLE */
189 185
190#ifdef CONFIG_PPC_PASEMI 186static u8 udbg_uart_in_mmio(unsigned int reg)
191void udbg_pas_real_flush(void)
192{ 187{
193 if (udbg_comport) { 188 return in_8(udbg_uart.mmio_base + (reg * udbg_uart_stride));
194 while ((real_205_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
195 /* wait for idle */;
196 }
197} 189}
198 190
199void udbg_pas_real_putc(char c) 191static void udbg_uart_out_mmio(unsigned int reg, u8 data)
200{ 192{
201 if (udbg_comport) { 193 out_8(udbg_uart.mmio_base + (reg * udbg_uart_stride), data);
202 if (c == '\n')
203 udbg_pas_real_putc('\r');
204 udbg_pas_real_flush();
205 real_205_writeb(c, &udbg_comport->thr); eieio();
206 }
207} 194}
208 195
209void udbg_init_pas_realmode(void)
210{
211 udbg_comport = (struct NS16550 __iomem *)0xfcff03f8UL;
212 196
213 udbg_putc = udbg_pas_real_putc; 197void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
214 udbg_flush = udbg_pas_real_flush; 198{
215 udbg_getc = NULL; 199 if (!addr)
216 udbg_getc_poll = NULL; 200 return;
201 udbg_uart.mmio_base = addr;
202 udbg_uart_stride = stride;
203 udbg_uart_in = udbg_uart_in_mmio;
204 udbg_uart_out = udbg_uart_out_mmio;
205 udbg_use_uart();
217} 206}
218#endif /* CONFIG_PPC_MAPLE */
219 207
220#ifdef CONFIG_PPC_EARLY_DEBUG_44x 208#ifdef CONFIG_PPC_MAPLE
221#include <platforms/44x/44x.h> 209
210#define UDBG_UART_MAPLE_ADDR ((void __iomem *)0xf40003f8)
222 211
223static void udbg_44x_as1_flush(void) 212static u8 udbg_uart_in_maple(unsigned int reg)
224{ 213{
225 if (udbg_comport) { 214 return real_readb(UDBG_UART_MAPLE_ADDR + reg);
226 while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
227 /* wait for idle */;
228 }
229} 215}
230 216
231static void udbg_44x_as1_putc(char c) 217static void udbg_uart_out_maple(unsigned int reg, u8 val)
232{ 218{
233 if (udbg_comport) { 219 real_writeb(val, UDBG_UART_MAPLE_ADDR + reg);
234 if (c == '\n')
235 udbg_44x_as1_putc('\r');
236 udbg_44x_as1_flush();
237 as1_writeb(c, &udbg_comport->thr); eieio();
238 }
239} 220}
240 221
241static int udbg_44x_as1_getc(void) 222void __init udbg_init_maple_realmode(void)
242{ 223{
243 if (udbg_comport) { 224 udbg_uart_in = udbg_uart_in_maple;
244 while ((as1_readb(&udbg_comport->lsr) & LSR_DR) == 0) 225 udbg_uart_out = udbg_uart_out_maple;
245 ; /* wait for char */ 226 udbg_use_uart();
246 return as1_readb(&udbg_comport->rbr);
247 }
248 return -1;
249} 227}
250 228
251void __init udbg_init_44x_as1(void) 229#endif /* CONFIG_PPC_MAPLE */
252{
253 udbg_comport =
254 (struct NS16550 __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR;
255 230
256 udbg_putc = udbg_44x_as1_putc; 231#ifdef CONFIG_PPC_PASEMI
257 udbg_flush = udbg_44x_as1_flush;
258 udbg_getc = udbg_44x_as1_getc;
259}
260#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
261 232
262#ifdef CONFIG_PPC_EARLY_DEBUG_40x 233#define UDBG_UART_PAS_ADDR ((void __iomem *)0xfcff03f8UL)
263static void udbg_40x_real_flush(void) 234
235static u8 udbg_uart_in_pas(unsigned int reg)
264{ 236{
265 if (udbg_comport) { 237 return real_205_readb(UDBG_UART_PAS_ADDR + reg);
266 while ((real_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
267 /* wait for idle */;
268 }
269} 238}
270 239
271static void udbg_40x_real_putc(char c) 240static void udbg_uart_out_pas(unsigned int reg, u8 val)
272{ 241{
273 if (udbg_comport) { 242 real_205_writeb(val, UDBG_UART_PAS_ADDR + reg);
274 if (c == '\n')
275 udbg_40x_real_putc('\r');
276 udbg_40x_real_flush();
277 real_writeb(c, &udbg_comport->thr); eieio();
278 }
279} 243}
280 244
281static int udbg_40x_real_getc(void) 245void __init udbg_init_pas_realmode(void)
282{ 246{
283 if (udbg_comport) { 247 udbg_uart_in = udbg_uart_in_pas;
284 while ((real_readb(&udbg_comport->lsr) & LSR_DR) == 0) 248 udbg_uart_out = udbg_uart_out_pas;
285 ; /* wait for char */ 249 udbg_use_uart();
286 return real_readb(&udbg_comport->rbr);
287 }
288 return -1;
289} 250}
290 251
291void __init udbg_init_40x_realmode(void) 252#endif /* CONFIG_PPC_PASEMI */
292{ 253
293 udbg_comport = (struct NS16550 __iomem *) 254#ifdef CONFIG_PPC_EARLY_DEBUG_44x
294 CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR;
295 255
296 udbg_putc = udbg_40x_real_putc; 256#include <platforms/44x/44x.h>
297 udbg_flush = udbg_40x_real_flush; 257
298 udbg_getc = udbg_40x_real_getc; 258static u8 udbg_uart_in_44x_as1(unsigned int reg)
299 udbg_getc_poll = NULL; 259{
260 return as1_readb((void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
300} 261}
301#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
302 262
303#ifdef CONFIG_PPC_EARLY_DEBUG_WSP 263static void udbg_uart_out_44x_as1(unsigned int reg, u8 val)
304static void udbg_wsp_flush(void)
305{ 264{
306 if (udbg_comport) { 265 as1_writeb(val, (void __iomem *)PPC44x_EARLY_DEBUG_VIRTADDR + reg);
307 while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0)
308 /* wait for idle */;
309 }
310} 266}
311 267
312static void udbg_wsp_putc(char c) 268void __init udbg_init_44x_as1(void)
313{ 269{
314 if (udbg_comport) { 270 udbg_uart_in = udbg_uart_in_44x_as1;
315 if (c == '\n') 271 udbg_uart_out = udbg_uart_out_44x_as1;
316 udbg_wsp_putc('\r'); 272 udbg_use_uart();
317 udbg_wsp_flush();
318 writeb(c, &udbg_comport->thr); eieio();
319 }
320} 273}
321 274
322static int udbg_wsp_getc(void) 275#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
276
277#ifdef CONFIG_PPC_EARLY_DEBUG_40x
278
279static u8 udbg_uart_in_40x(unsigned int reg)
323{ 280{
324 if (udbg_comport) { 281 return real_readb((void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
325 while ((readb(&udbg_comport->lsr) & LSR_DR) == 0) 282 + reg);
326 ; /* wait for char */
327 return readb(&udbg_comport->rbr);
328 }
329 return -1;
330} 283}
331 284
332static int udbg_wsp_getc_poll(void) 285static void udbg_uart_out_40x(unsigned int reg, u8 val)
333{ 286{
334 if (udbg_comport) 287 real_writeb(val, (void __iomem *)CONFIG_PPC_EARLY_DEBUG_40x_PHYSADDR
335 if (readb(&udbg_comport->lsr) & LSR_DR) 288 + reg);
336 return readb(&udbg_comport->rbr);
337 return -1;
338} 289}
339 290
340void __init udbg_init_wsp(void) 291void __init udbg_init_40x_realmode(void)
341{ 292{
342 udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT; 293 udbg_uart_in = udbg_uart_in_40x;
294 udbg_uart_out = udbg_uart_out_40x;
295 udbg_use_uart();
296}
343 297
344 udbg_init_uart(udbg_comport, 57600, 50000000); 298#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
299
300
301#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
345 302
346 udbg_putc = udbg_wsp_putc; 303void __init udbg_init_wsp(void)
347 udbg_flush = udbg_wsp_flush; 304{
348 udbg_getc = udbg_wsp_getc; 305 udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
349 udbg_getc_poll = udbg_wsp_getc_poll; 306 udbg_uart_setup(57600, 50000000);
350} 307}
308
351#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ 309#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 27e2f623210b..6b1f2a6d5517 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -232,9 +232,9 @@ __do_get_tspec:
232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9) 232 lwz r6,(CFG_TB_ORIG_STAMP+4)(r9)
233 233
234 /* Get a stable TB value */ 234 /* Get a stable TB value */
2352: mftbu r3 2352: mfspr r3, SPRN_TBRU
236 mftbl r4 236 mfspr r4, SPRN_TBRL
237 mftbu r0 237 mfspr r0, SPRN_TBRU
238 cmplw cr0,r3,r0 238 cmplw cr0,r3,r0
239 bne- 2b 239 bne- 2b
240 240
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 536016d792ba..78a350670de3 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1153,7 +1153,7 @@ EXPORT_SYMBOL(vio_h_cop_sync);
1153 1153
1154static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev) 1154static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1155{ 1155{
1156 const unsigned char *dma_window; 1156 const __be32 *dma_window;
1157 struct iommu_table *tbl; 1157 struct iommu_table *tbl;
1158 unsigned long offset, size; 1158 unsigned long offset, size;
1159 1159
@@ -1312,8 +1312,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1312{ 1312{
1313 struct vio_dev *viodev; 1313 struct vio_dev *viodev;
1314 struct device_node *parent_node; 1314 struct device_node *parent_node;
1315 const unsigned int *unit_address; 1315 const __be32 *prop;
1316 const unsigned int *pfo_resid = NULL;
1317 enum vio_dev_family family; 1316 enum vio_dev_family family;
1318 const char *of_node_name = of_node->name ? of_node->name : "<unknown>"; 1317 const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
1319 1318
@@ -1360,6 +1359,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1360 /* we need the 'device_type' property, in order to match with drivers */ 1359 /* we need the 'device_type' property, in order to match with drivers */
1361 viodev->family = family; 1360 viodev->family = family;
1362 if (viodev->family == VDEVICE) { 1361 if (viodev->family == VDEVICE) {
1362 unsigned int unit_address;
1363
1363 if (of_node->type != NULL) 1364 if (of_node->type != NULL)
1364 viodev->type = of_node->type; 1365 viodev->type = of_node->type;
1365 else { 1366 else {
@@ -1368,24 +1369,24 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1368 goto out; 1369 goto out;
1369 } 1370 }
1370 1371
1371 unit_address = of_get_property(of_node, "reg", NULL); 1372 prop = of_get_property(of_node, "reg", NULL);
1372 if (unit_address == NULL) { 1373 if (prop == NULL) {
1373 pr_warn("%s: node %s missing 'reg'\n", 1374 pr_warn("%s: node %s missing 'reg'\n",
1374 __func__, of_node_name); 1375 __func__, of_node_name);
1375 goto out; 1376 goto out;
1376 } 1377 }
1377 dev_set_name(&viodev->dev, "%x", *unit_address); 1378 unit_address = of_read_number(prop, 1);
1379 dev_set_name(&viodev->dev, "%x", unit_address);
1378 viodev->irq = irq_of_parse_and_map(of_node, 0); 1380 viodev->irq = irq_of_parse_and_map(of_node, 0);
1379 viodev->unit_address = *unit_address; 1381 viodev->unit_address = unit_address;
1380 } else { 1382 } else {
1381 /* PFO devices need their resource_id for submitting COP_OPs 1383 /* PFO devices need their resource_id for submitting COP_OPs
1382 * This is an optional field for devices, but is required when 1384 * This is an optional field for devices, but is required when
1383 * performing synchronous ops */ 1385 * performing synchronous ops */
1384 pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL); 1386 prop = of_get_property(of_node, "ibm,resource-id", NULL);
1385 if (pfo_resid != NULL) 1387 if (prop != NULL)
1386 viodev->resource_id = *pfo_resid; 1388 viodev->resource_id = of_read_number(prop, 1);
1387 1389
1388 unit_address = NULL;
1389 dev_set_name(&viodev->dev, "%s", of_node_name); 1390 dev_set_name(&viodev->dev, "%s", of_node_name);
1390 viodev->type = of_node_name; 1391 viodev->type = of_node_name;
1391 viodev->irq = 0; 1392 viodev->irq = 0;
@@ -1622,7 +1623,6 @@ static struct vio_dev *vio_find_name(const char *name)
1622 */ 1623 */
1623struct vio_dev *vio_find_node(struct device_node *vnode) 1624struct vio_dev *vio_find_node(struct device_node *vnode)
1624{ 1625{
1625 const uint32_t *unit_address;
1626 char kobj_name[20]; 1626 char kobj_name[20];
1627 struct device_node *vnode_parent; 1627 struct device_node *vnode_parent;
1628 const char *dev_type; 1628 const char *dev_type;
@@ -1638,10 +1638,13 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
1638 1638
1639 /* construct the kobject name from the device node */ 1639 /* construct the kobject name from the device node */
1640 if (!strcmp(dev_type, "vdevice")) { 1640 if (!strcmp(dev_type, "vdevice")) {
1641 unit_address = of_get_property(vnode, "reg", NULL); 1641 const __be32 *prop;
1642 if (!unit_address) 1642
1643 prop = of_get_property(vnode, "reg", NULL);
1644 if (!prop)
1643 return NULL; 1645 return NULL;
1644 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address); 1646 snprintf(kobj_name, sizeof(kobj_name), "%x",
1647 (uint32_t)of_read_number(prop, 1));
1645 } else if (!strcmp(dev_type, "ibm,platform-facilities")) 1648 } else if (!strcmp(dev_type, "ibm,platform-facilities"))
1646 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name); 1649 snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
1647 else 1650 else
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index eb643f862579..ffaef2cb101a 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -72,6 +72,7 @@ config KVM_BOOK3S_64_HV
72 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 72 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
73 depends on KVM_BOOK3S_64 73 depends on KVM_BOOK3S_64
74 select MMU_NOTIFIER 74 select MMU_NOTIFIER
75 select CMA
75 ---help--- 76 ---help---
76 Support running unmodified book3s_64 guest kernels in 77 Support running unmodified book3s_64 guest kernels in
77 virtual machines on POWER7 and PPC970 processors that have 78 virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 008cd856c5b5..6646c952c5e3 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -81,6 +81,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
81 book3s_64_vio_hv.o \ 81 book3s_64_vio_hv.o \
82 book3s_hv_ras.o \ 82 book3s_hv_ras.o \
83 book3s_hv_builtin.o \ 83 book3s_hv_builtin.o \
84 book3s_hv_cma.o \
84 $(kvm-book3s_64-builtin-xics-objs-y) 85 $(kvm-book3s_64-builtin-xics-objs-y)
85 86
86kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ 87kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 739bfbadb85e..7e345e00661a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
182 hva_t ptegp; 182 hva_t ptegp;
183 u64 pteg[16]; 183 u64 pteg[16];
184 u64 avpn = 0; 184 u64 avpn = 0;
185 u64 v, r;
186 u64 v_val, v_mask;
187 u64 eaddr_mask;
185 int i; 188 int i;
186 u8 key = 0; 189 u8 pp, key = 0;
187 bool found = false; 190 bool found = false;
188 int second = 0; 191 bool second = false;
189 ulong mp_ea = vcpu->arch.magic_page_ea; 192 ulong mp_ea = vcpu->arch.magic_page_ea;
190 193
191 /* Magic page override */ 194 /* Magic page override */
@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
208 goto no_seg_found; 211 goto no_seg_found;
209 212
210 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); 213 avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
214 v_val = avpn & HPTE_V_AVPN;
215
211 if (slbe->tb) 216 if (slbe->tb)
212 avpn |= SLB_VSID_B_1T; 217 v_val |= SLB_VSID_B_1T;
218 if (slbe->large)
219 v_val |= HPTE_V_LARGE;
220 v_val |= HPTE_V_VALID;
221
222 v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
223 HPTE_V_SECONDARY;
213 224
214do_second: 225do_second:
215 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); 226 ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
@@ -227,91 +238,74 @@ do_second:
227 key = 4; 238 key = 4;
228 239
229 for (i=0; i<16; i+=2) { 240 for (i=0; i<16; i+=2) {
230 u64 v = pteg[i]; 241 /* Check all relevant fields of 1st dword */
231 u64 r = pteg[i+1]; 242 if ((pteg[i] & v_mask) == v_val) {
232
233 /* Valid check */
234 if (!(v & HPTE_V_VALID))
235 continue;
236 /* Hash check */
237 if ((v & HPTE_V_SECONDARY) != second)
238 continue;
239
240 /* AVPN compare */
241 if (HPTE_V_COMPARE(avpn, v)) {
242 u8 pp = (r & HPTE_R_PP) | key;
243 int eaddr_mask = 0xFFF;
244
245 gpte->eaddr = eaddr;
246 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
247 eaddr,
248 data);
249 if (slbe->large)
250 eaddr_mask = 0xFFFFFF;
251 gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
252 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
253 gpte->may_read = false;
254 gpte->may_write = false;
255
256 switch (pp) {
257 case 0:
258 case 1:
259 case 2:
260 case 6:
261 gpte->may_write = true;
262 /* fall through */
263 case 3:
264 case 5:
265 case 7:
266 gpte->may_read = true;
267 break;
268 }
269
270 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
271 "-> 0x%lx\n",
272 eaddr, avpn, gpte->vpage, gpte->raddr);
273 found = true; 243 found = true;
274 break; 244 break;
275 } 245 }
276 } 246 }
277 247
278 /* Update PTE R and C bits, so the guest's swapper knows we used the 248 if (!found) {
279 * page */ 249 if (second)
280 if (found) { 250 goto no_page_found;
281 u32 oldr = pteg[i+1]; 251 v_val |= HPTE_V_SECONDARY;
252 second = true;
253 goto do_second;
254 }
282 255
283 if (gpte->may_read) { 256 v = pteg[i];
284 /* Set the accessed flag */ 257 r = pteg[i+1];
285 pteg[i+1] |= HPTE_R_R; 258 pp = (r & HPTE_R_PP) | key;
286 } 259 eaddr_mask = 0xFFF;
287 if (gpte->may_write) { 260
288 /* Set the dirty flag */ 261 gpte->eaddr = eaddr;
289 pteg[i+1] |= HPTE_R_C; 262 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
290 } else { 263 if (slbe->large)
291 dprintk("KVM: Mapping read-only page!\n"); 264 eaddr_mask = 0xFFFFFF;
292 } 265 gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
266 gpte->may_execute = ((r & HPTE_R_N) ? false : true);
267 gpte->may_read = false;
268 gpte->may_write = false;
269
270 switch (pp) {
271 case 0:
272 case 1:
273 case 2:
274 case 6:
275 gpte->may_write = true;
276 /* fall through */
277 case 3:
278 case 5:
279 case 7:
280 gpte->may_read = true;
281 break;
282 }
293 283
294 /* Write back into the PTEG */ 284 dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
295 if (pteg[i+1] != oldr) 285 "-> 0x%lx\n",
296 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg)); 286 eaddr, avpn, gpte->vpage, gpte->raddr);
297 287
298 if (!gpte->may_read) 288 /* Update PTE R and C bits, so the guest's swapper knows we used the
299 return -EPERM; 289 * page */
300 return 0; 290 if (gpte->may_read) {
301 } else { 291 /* Set the accessed flag */
302 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx " 292 r |= HPTE_R_R;
303 "ptegp=0x%lx)\n", 293 }
304 eaddr, to_book3s(vcpu)->sdr1, ptegp); 294 if (data && gpte->may_write) {
305 for (i = 0; i < 16; i += 2) 295 /* Set the dirty flag -- XXX even if not writing */
306 dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n", 296 r |= HPTE_R_C;
307 i, pteg[i], pteg[i+1], avpn); 297 }
308 298
309 if (!second) { 299 /* Write back into the PTEG */
310 second = HPTE_V_SECONDARY; 300 if (pteg[i+1] != r) {
311 goto do_second; 301 pteg[i+1] = r;
312 } 302 copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
313 } 303 }
314 304
305 if (!gpte->may_read)
306 return -EPERM;
307 return 0;
308
315no_page_found: 309no_page_found:
316 return -ENOENT; 310 return -ENOENT;
317 311
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 710d31317d81..043eec8461e7 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,8 @@
37#include <asm/ppc-opcode.h> 37#include <asm/ppc-opcode.h>
38#include <asm/cputable.h> 38#include <asm/cputable.h>
39 39
40#include "book3s_hv_cma.h"
41
40/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 42/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
41#define MAX_LPID_970 63 43#define MAX_LPID_970 63
42 44
@@ -52,8 +54,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
52{ 54{
53 unsigned long hpt; 55 unsigned long hpt;
54 struct revmap_entry *rev; 56 struct revmap_entry *rev;
55 struct kvmppc_linear_info *li; 57 struct page *page = NULL;
56 long order = kvm_hpt_order; 58 long order = KVM_DEFAULT_HPT_ORDER;
57 59
58 if (htab_orderp) { 60 if (htab_orderp) {
59 order = *htab_orderp; 61 order = *htab_orderp;
@@ -61,26 +63,23 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
61 order = PPC_MIN_HPT_ORDER; 63 order = PPC_MIN_HPT_ORDER;
62 } 64 }
63 65
66 kvm->arch.hpt_cma_alloc = 0;
64 /* 67 /*
65 * If the user wants a different size from default,
66 * try first to allocate it from the kernel page allocator. 68 * try first to allocate it from the kernel page allocator.
69 * We keep the CMA reserved for failed allocation.
67 */ 70 */
68 hpt = 0; 71 hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
69 if (order != kvm_hpt_order) { 72 __GFP_NOWARN, order - PAGE_SHIFT);
70 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
71 __GFP_NOWARN, order - PAGE_SHIFT);
72 if (!hpt)
73 --order;
74 }
75 73
76 /* Next try to allocate from the preallocated pool */ 74 /* Next try to allocate from the preallocated pool */
77 if (!hpt) { 75 if (!hpt) {
78 li = kvm_alloc_hpt(); 76 VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
79 if (li) { 77 page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
80 hpt = (ulong)li->base_virt; 78 if (page) {
81 kvm->arch.hpt_li = li; 79 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
82 order = kvm_hpt_order; 80 kvm->arch.hpt_cma_alloc = 1;
83 } 81 } else
82 --order;
84 } 83 }
85 84
86 /* Lastly try successively smaller sizes from the page allocator */ 85 /* Lastly try successively smaller sizes from the page allocator */
@@ -118,8 +117,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
118 return 0; 117 return 0;
119 118
120 out_freehpt: 119 out_freehpt:
121 if (kvm->arch.hpt_li) 120 if (kvm->arch.hpt_cma_alloc)
122 kvm_release_hpt(kvm->arch.hpt_li); 121 kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
123 else 122 else
124 free_pages(hpt, order - PAGE_SHIFT); 123 free_pages(hpt, order - PAGE_SHIFT);
125 return -ENOMEM; 124 return -ENOMEM;
@@ -165,8 +164,9 @@ void kvmppc_free_hpt(struct kvm *kvm)
165{ 164{
166 kvmppc_free_lpid(kvm->arch.lpid); 165 kvmppc_free_lpid(kvm->arch.lpid);
167 vfree(kvm->arch.revmap); 166 vfree(kvm->arch.revmap);
168 if (kvm->arch.hpt_li) 167 if (kvm->arch.hpt_cma_alloc)
169 kvm_release_hpt(kvm->arch.hpt_li); 168 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
169 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
170 else 170 else
171 free_pages(kvm->arch.hpt_virt, 171 free_pages(kvm->arch.hpt_virt,
172 kvm->arch.hpt_order - PAGE_SHIFT); 172 kvm->arch.hpt_order - PAGE_SHIFT);
@@ -1579,7 +1579,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1579 ctx->first_pass = 1; 1579 ctx->first_pass = 1;
1580 1580
1581 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; 1581 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
1582 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag); 1582 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
1583 if (ret < 0) { 1583 if (ret < 0) {
1584 kvm_put_kvm(kvm); 1584 kvm_put_kvm(kvm);
1585 return ret; 1585 return ret;
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 4f0caecc0f9d..4f12e8f0c718 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,6 +17,10 @@
17 * Authors: Alexander Graf <agraf@suse.de> 17 * Authors: Alexander Graf <agraf@suse.de>
18 */ 18 */
19 19
20#ifdef __LITTLE_ENDIAN__
21#error Need to fix SLB shadow accesses in little endian mode
22#endif
23
20#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) 24#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
21#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) 25#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
22#define UNBOLT_SLB_ENTRY(num) \ 26#define UNBOLT_SLB_ENTRY(num) \
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b2d3f3b2de72..54cf9bc94dad 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -136,7 +136,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
136 mutex_unlock(&kvm->lock); 136 mutex_unlock(&kvm->lock);
137 137
138 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops, 138 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
139 stt, O_RDWR); 139 stt, O_RDWR | O_CLOEXEC);
140 140
141fail: 141fail:
142 if (stt) { 142 if (stt) {
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 1f6344c4408d..360ce68c9809 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -458,6 +458,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
458 case SPRN_PMC4_GEKKO: 458 case SPRN_PMC4_GEKKO:
459 case SPRN_WPAR_GEKKO: 459 case SPRN_WPAR_GEKKO:
460 case SPRN_MSSSR0: 460 case SPRN_MSSSR0:
461 case SPRN_DABR:
461 break; 462 break;
462unprivileged: 463unprivileged:
463 default: 464 default:
@@ -555,6 +556,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
555 case SPRN_PMC4_GEKKO: 556 case SPRN_PMC4_GEKKO:
556 case SPRN_WPAR_GEKKO: 557 case SPRN_WPAR_GEKKO:
557 case SPRN_MSSSR0: 558 case SPRN_MSSSR0:
559 case SPRN_DABR:
558 *spr_val = 0; 560 *spr_val = 0;
559 break; 561 break;
560 default: 562 default:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741a..62a2b5ab08ed 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -217,7 +217,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
217 217
218static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) 218static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
219{ 219{
220 vpa->shared_proc = 1; 220 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
221 vpa->yield_count = 1; 221 vpa->yield_count = 1;
222} 222}
223 223
@@ -680,13 +680,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
680} 680}
681 681
682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 682int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
683 struct kvm_sregs *sregs) 683 struct kvm_sregs *sregs)
684{ 684{
685 int i; 685 int i;
686 686
687 sregs->pvr = vcpu->arch.pvr;
688
689 memset(sregs, 0, sizeof(struct kvm_sregs)); 687 memset(sregs, 0, sizeof(struct kvm_sregs));
688 sregs->pvr = vcpu->arch.pvr;
690 for (i = 0; i < vcpu->arch.slb_max; i++) { 689 for (i = 0; i < vcpu->arch.slb_max; i++) {
691 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; 690 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
692 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; 691 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
@@ -696,7 +695,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
696} 695}
697 696
698int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 697int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
699 struct kvm_sregs *sregs) 698 struct kvm_sregs *sregs)
700{ 699{
701 int i, j; 700 int i, j;
702 701
@@ -1511,10 +1510,10 @@ static inline int lpcr_rmls(unsigned long rma_size)
1511 1510
1512static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1511static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1513{ 1512{
1514 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
1515 struct page *page; 1513 struct page *page;
1514 struct kvm_rma_info *ri = vma->vm_file->private_data;
1516 1515
1517 if (vmf->pgoff >= ri->npages) 1516 if (vmf->pgoff >= kvm_rma_pages)
1518 return VM_FAULT_SIGBUS; 1517 return VM_FAULT_SIGBUS;
1519 1518
1520 page = pfn_to_page(ri->base_pfn + vmf->pgoff); 1519 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1535,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1536 1535
1537static int kvm_rma_release(struct inode *inode, struct file *filp) 1536static int kvm_rma_release(struct inode *inode, struct file *filp)
1538{ 1537{
1539 struct kvmppc_linear_info *ri = filp->private_data; 1538 struct kvm_rma_info *ri = filp->private_data;
1540 1539
1541 kvm_release_rma(ri); 1540 kvm_release_rma(ri);
1542 return 0; 1541 return 0;
@@ -1549,18 +1548,27 @@ static const struct file_operations kvm_rma_fops = {
1549 1548
1550long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1549long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1551{ 1550{
1552 struct kvmppc_linear_info *ri;
1553 long fd; 1551 long fd;
1552 struct kvm_rma_info *ri;
1553 /*
1554 * Only do this on PPC970 in HV mode
1555 */
1556 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
1557 !cpu_has_feature(CPU_FTR_ARCH_201))
1558 return -EINVAL;
1559
1560 if (!kvm_rma_pages)
1561 return -EINVAL;
1554 1562
1555 ri = kvm_alloc_rma(); 1563 ri = kvm_alloc_rma();
1556 if (!ri) 1564 if (!ri)
1557 return -ENOMEM; 1565 return -ENOMEM;
1558 1566
1559 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR); 1567 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
1560 if (fd < 0) 1568 if (fd < 0)
1561 kvm_release_rma(ri); 1569 kvm_release_rma(ri);
1562 1570
1563 ret->rma_size = ri->npages << PAGE_SHIFT; 1571 ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
1564 return fd; 1572 return fd;
1565} 1573}
1566 1574
@@ -1725,7 +1733,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1725{ 1733{
1726 int err = 0; 1734 int err = 0;
1727 struct kvm *kvm = vcpu->kvm; 1735 struct kvm *kvm = vcpu->kvm;
1728 struct kvmppc_linear_info *ri = NULL; 1736 struct kvm_rma_info *ri = NULL;
1729 unsigned long hva; 1737 unsigned long hva;
1730 struct kvm_memory_slot *memslot; 1738 struct kvm_memory_slot *memslot;
1731 struct vm_area_struct *vma; 1739 struct vm_area_struct *vma;
@@ -1803,13 +1811,13 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1803 1811
1804 } else { 1812 } else {
1805 /* Set up to use an RMO region */ 1813 /* Set up to use an RMO region */
1806 rma_size = ri->npages; 1814 rma_size = kvm_rma_pages;
1807 if (rma_size > memslot->npages) 1815 if (rma_size > memslot->npages)
1808 rma_size = memslot->npages; 1816 rma_size = memslot->npages;
1809 rma_size <<= PAGE_SHIFT; 1817 rma_size <<= PAGE_SHIFT;
1810 rmls = lpcr_rmls(rma_size); 1818 rmls = lpcr_rmls(rma_size);
1811 err = -EINVAL; 1819 err = -EINVAL;
1812 if (rmls < 0) { 1820 if ((long)rmls < 0) {
1813 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); 1821 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1814 goto out_srcu; 1822 goto out_srcu;
1815 } 1823 }
@@ -1831,14 +1839,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
1831 /* POWER7 */ 1839 /* POWER7 */
1832 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L); 1840 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1833 lpcr |= rmls << LPCR_RMLS_SH; 1841 lpcr |= rmls << LPCR_RMLS_SH;
1834 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; 1842 kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
1835 } 1843 }
1836 kvm->arch.lpcr = lpcr; 1844 kvm->arch.lpcr = lpcr;
1837 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n", 1845 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1838 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1846 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1839 1847
1840 /* Initialize phys addrs of pages in RMO */ 1848 /* Initialize phys addrs of pages in RMO */
1841 npages = ri->npages; 1849 npages = kvm_rma_pages;
1842 porder = __ilog2(npages); 1850 porder = __ilog2(npages);
1843 physp = memslot->arch.slot_phys; 1851 physp = memslot->arch.slot_phys;
1844 if (physp) { 1852 if (physp) {
@@ -1874,7 +1882,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1874 /* Allocate the guest's logical partition ID */ 1882 /* Allocate the guest's logical partition ID */
1875 1883
1876 lpid = kvmppc_alloc_lpid(); 1884 lpid = kvmppc_alloc_lpid();
1877 if (lpid < 0) 1885 if ((long)lpid < 0)
1878 return -ENOMEM; 1886 return -ENOMEM;
1879 kvm->arch.lpid = lpid; 1887 kvm->arch.lpid = lpid;
1880 1888
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index ec0a9e5de100..8cd0daebb82d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -13,33 +13,34 @@
13#include <linux/spinlock.h> 13#include <linux/spinlock.h>
14#include <linux/bootmem.h> 14#include <linux/bootmem.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/memblock.h>
17#include <linux/sizes.h>
16 18
17#include <asm/cputable.h> 19#include <asm/cputable.h>
18#include <asm/kvm_ppc.h> 20#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h> 21#include <asm/kvm_book3s.h>
20 22
21#define KVM_LINEAR_RMA 0 23#include "book3s_hv_cma.h"
22#define KVM_LINEAR_HPT 1 24/*
23 25 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
24static void __init kvm_linear_init_one(ulong size, int count, int type); 26 * should be power of 2.
25static struct kvmppc_linear_info *kvm_alloc_linear(int type); 27 */
26static void kvm_release_linear(struct kvmppc_linear_info *ri); 28#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
27 29/*
28int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER; 30 * By default we reserve 5% of memory for hash pagetable allocation.
29EXPORT_SYMBOL_GPL(kvm_hpt_order); 31 */
30 32static unsigned long kvm_cma_resv_ratio = 5;
31/*************** RMA *************/
32
33/* 33/*
34 * This maintains a list of RMAs (real mode areas) for KVM guests to use. 34 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
35 * Each RMA has to be physically contiguous and of a size that the 35 * Each RMA has to be physically contiguous and of a size that the
36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB, 36 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
37 * and other larger sizes. Since we are unlikely to be allocate that 37 * and other larger sizes. Since we are unlikely to be allocate that
38 * much physically contiguous memory after the system is up and running, 38 * much physically contiguous memory after the system is up and running,
39 * we preallocate a set of RMAs in early boot for KVM to use. 39 * we preallocate a set of RMAs in early boot using CMA.
40 * should be power of 2.
40 */ 41 */
41static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ 42unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
42static unsigned long kvm_rma_count; 43EXPORT_SYMBOL_GPL(kvm_rma_pages);
43 44
44/* Work out RMLS (real mode limit selector) field value for a given RMA size. 45/* Work out RMLS (real mode limit selector) field value for a given RMA size.
45 Assumes POWER7 or PPC970. */ 46 Assumes POWER7 or PPC970. */
@@ -69,165 +70,114 @@ static inline int lpcr_rmls(unsigned long rma_size)
69 70
70static int __init early_parse_rma_size(char *p) 71static int __init early_parse_rma_size(char *p)
71{ 72{
72 if (!p) 73 unsigned long kvm_rma_size;
73 return 1;
74 74
75 pr_debug("%s(%s)\n", __func__, p);
76 if (!p)
77 return -EINVAL;
75 kvm_rma_size = memparse(p, &p); 78 kvm_rma_size = memparse(p, &p);
76 79 /*
80 * Check that the requested size is one supported in hardware
81 */
82 if (lpcr_rmls(kvm_rma_size) < 0) {
83 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
84 return -EINVAL;
85 }
86 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
77 return 0; 87 return 0;
78} 88}
79early_param("kvm_rma_size", early_parse_rma_size); 89early_param("kvm_rma_size", early_parse_rma_size);
80 90
81static int __init early_parse_rma_count(char *p) 91struct kvm_rma_info *kvm_alloc_rma()
82{ 92{
83 if (!p) 93 struct page *page;
84 return 1; 94 struct kvm_rma_info *ri;
85 95
86 kvm_rma_count = simple_strtoul(p, NULL, 0); 96 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
87 97 if (!ri)
88 return 0; 98 return NULL;
89} 99 page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
90early_param("kvm_rma_count", early_parse_rma_count); 100 if (!page)
91 101 goto err_out;
92struct kvmppc_linear_info *kvm_alloc_rma(void) 102 atomic_set(&ri->use_count, 1);
93{ 103 ri->base_pfn = page_to_pfn(page);
94 return kvm_alloc_linear(KVM_LINEAR_RMA); 104 return ri;
105err_out:
106 kfree(ri);
107 return NULL;
95} 108}
96EXPORT_SYMBOL_GPL(kvm_alloc_rma); 109EXPORT_SYMBOL_GPL(kvm_alloc_rma);
97 110
98void kvm_release_rma(struct kvmppc_linear_info *ri) 111void kvm_release_rma(struct kvm_rma_info *ri)
99{ 112{
100 kvm_release_linear(ri); 113 if (atomic_dec_and_test(&ri->use_count)) {
114 kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
115 kfree(ri);
116 }
101} 117}
102EXPORT_SYMBOL_GPL(kvm_release_rma); 118EXPORT_SYMBOL_GPL(kvm_release_rma);
103 119
104/*************** HPT *************/ 120static int __init early_parse_kvm_cma_resv(char *p)
105
106/*
107 * This maintains a list of big linear HPT tables that contain the GVA->HPA
108 * memory mappings. If we don't reserve those early on, we might not be able
109 * to get a big (usually 16MB) linear memory region from the kernel anymore.
110 */
111
112static unsigned long kvm_hpt_count;
113
114static int __init early_parse_hpt_count(char *p)
115{ 121{
122 pr_debug("%s(%s)\n", __func__, p);
116 if (!p) 123 if (!p)
117 return 1; 124 return -EINVAL;
118 125 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
119 kvm_hpt_count = simple_strtoul(p, NULL, 0);
120
121 return 0;
122} 126}
123early_param("kvm_hpt_count", early_parse_hpt_count); 127early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
124 128
125struct kvmppc_linear_info *kvm_alloc_hpt(void) 129struct page *kvm_alloc_hpt(unsigned long nr_pages)
126{ 130{
127 return kvm_alloc_linear(KVM_LINEAR_HPT); 131 unsigned long align_pages = HPT_ALIGN_PAGES;
132
133 /* Old CPUs require HPT aligned on a multiple of its size */
134 if (!cpu_has_feature(CPU_FTR_ARCH_206))
135 align_pages = nr_pages;
136 return kvm_alloc_cma(nr_pages, align_pages);
128} 137}
129EXPORT_SYMBOL_GPL(kvm_alloc_hpt); 138EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
130 139
131void kvm_release_hpt(struct kvmppc_linear_info *li) 140void kvm_release_hpt(struct page *page, unsigned long nr_pages)
132{ 141{
133 kvm_release_linear(li); 142 kvm_release_cma(page, nr_pages);
134} 143}
135EXPORT_SYMBOL_GPL(kvm_release_hpt); 144EXPORT_SYMBOL_GPL(kvm_release_hpt);
136 145
137/*************** generic *************/ 146/**
138 147 * kvm_cma_reserve() - reserve area for kvm hash pagetable
139static LIST_HEAD(free_linears); 148 *
140static DEFINE_SPINLOCK(linear_lock); 149 * This function reserves memory from early allocator. It should be
141 150 * called by arch specific code once the early allocator (memblock or bootmem)
142static void __init kvm_linear_init_one(ulong size, int count, int type) 151 * has been activated and all other subsystems have already allocated/reserved
143{ 152 * memory.
144 unsigned long i;
145 unsigned long j, npages;
146 void *linear;
147 struct page *pg;
148 const char *typestr;
149 struct kvmppc_linear_info *linear_info;
150
151 if (!count)
152 return;
153
154 typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
155
156 npages = size >> PAGE_SHIFT;
157 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
158 for (i = 0; i < count; ++i) {
159 linear = alloc_bootmem_align(size, size);
160 pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
161 size >> 20);
162 linear_info[i].base_virt = linear;
163 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
164 linear_info[i].npages = npages;
165 linear_info[i].type = type;
166 list_add_tail(&linear_info[i].list, &free_linears);
167 atomic_set(&linear_info[i].use_count, 0);
168
169 pg = pfn_to_page(linear_info[i].base_pfn);
170 for (j = 0; j < npages; ++j) {
171 atomic_inc(&pg->_count);
172 ++pg;
173 }
174 }
175}
176
177static struct kvmppc_linear_info *kvm_alloc_linear(int type)
178{
179 struct kvmppc_linear_info *ri, *ret;
180
181 ret = NULL;
182 spin_lock(&linear_lock);
183 list_for_each_entry(ri, &free_linears, list) {
184 if (ri->type != type)
185 continue;
186
187 list_del(&ri->list);
188 atomic_inc(&ri->use_count);
189 memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
190 ret = ri;
191 break;
192 }
193 spin_unlock(&linear_lock);
194 return ret;
195}
196
197static void kvm_release_linear(struct kvmppc_linear_info *ri)
198{
199 if (atomic_dec_and_test(&ri->use_count)) {
200 spin_lock(&linear_lock);
201 list_add_tail(&ri->list, &free_linears);
202 spin_unlock(&linear_lock);
203
204 }
205}
206
207/*
208 * Called at boot time while the bootmem allocator is active,
209 * to allocate contiguous physical memory for the hash page
210 * tables for guests.
211 */ 153 */
212void __init kvm_linear_init(void) 154void __init kvm_cma_reserve(void)
213{ 155{
214 /* HPT */ 156 unsigned long align_size;
215 kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT); 157 struct memblock_region *reg;
216 158 phys_addr_t selected_size = 0;
217 /* RMA */ 159 /*
218 /* Only do this on PPC970 in HV mode */ 160 * We cannot use memblock_phys_mem_size() here, because
219 if (!cpu_has_feature(CPU_FTR_HVMODE) || 161 * memblock_analyze() has not been called yet.
220 !cpu_has_feature(CPU_FTR_ARCH_201)) 162 */
221 return; 163 for_each_memblock(memory, reg)
222 164 selected_size += memblock_region_memory_end_pfn(reg) -
223 if (!kvm_rma_size || !kvm_rma_count) 165 memblock_region_memory_base_pfn(reg);
224 return; 166
225 167 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
226 /* Check that the requested size is one supported in hardware */ 168 if (selected_size) {
227 if (lpcr_rmls(kvm_rma_size) < 0) { 169 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
228 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); 170 (unsigned long)selected_size / SZ_1M);
229 return; 171 /*
172 * Old CPUs require HPT aligned on a multiple of its size. So for them
173 * make the alignment as max size we could request.
174 */
175 if (!cpu_has_feature(CPU_FTR_ARCH_206))
176 align_size = __rounddown_pow_of_two(selected_size);
177 else
178 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
179
180 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
181 kvm_cma_declare_contiguous(selected_size, align_size);
230 } 182 }
231
232 kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
233} 183}
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
new file mode 100644
index 000000000000..d9d3d8553d51
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.c
@@ -0,0 +1,240 @@
1/*
2 * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
3 * for DMA mapping framework
4 *
5 * Copyright IBM Corporation, 2013
6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 *
13 */
14#define pr_fmt(fmt) "kvm_cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <linux/memblock.h>
23#include <linux/mutex.h>
24#include <linux/sizes.h>
25#include <linux/slab.h>
26
27#include "book3s_hv_cma.h"
28
29struct kvm_cma {
30 unsigned long base_pfn;
31 unsigned long count;
32 unsigned long *bitmap;
33};
34
35static DEFINE_MUTEX(kvm_cma_mutex);
36static struct kvm_cma kvm_cma_area;
37
38/**
39 * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
40 * for kvm hash pagetable
41 * @size: Size of the reserved memory.
42 * @alignment: Alignment for the contiguous memory area
43 *
44 * This function reserves memory for kvm cma area. It should be
45 * called by arch code when early allocator (memblock or bootmem)
46 * is still activate.
47 */
48long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
49{
50 long base_pfn;
51 phys_addr_t addr;
52 struct kvm_cma *cma = &kvm_cma_area;
53
54 pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
55
56 if (!size)
57 return -EINVAL;
58 /*
59 * Sanitise input arguments.
60 * We should be pageblock aligned for CMA.
61 */
62 alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
63 size = ALIGN(size, alignment);
64 /*
65 * Reserve memory
66 * Use __memblock_alloc_base() since
67 * memblock_alloc_base() panic()s.
68 */
69 addr = __memblock_alloc_base(size, alignment, 0);
70 if (!addr) {
71 base_pfn = -ENOMEM;
72 goto err;
73 } else
74 base_pfn = PFN_DOWN(addr);
75
76 /*
77 * Each reserved area must be initialised later, when more kernel
78 * subsystems (like slab allocator) are available.
79 */
80 cma->base_pfn = base_pfn;
81 cma->count = size >> PAGE_SHIFT;
82 pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
83 return 0;
84err:
85 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
86 return base_pfn;
87}
88
89/**
90 * kvm_alloc_cma() - allocate pages from contiguous area
91 * @nr_pages: Requested number of pages.
92 * @align_pages: Requested alignment in number of pages
93 *
94 * This function allocates memory buffer for hash pagetable.
95 */
96struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
97{
98 int ret;
99 struct page *page = NULL;
100 struct kvm_cma *cma = &kvm_cma_area;
101 unsigned long chunk_count, nr_chunk;
102 unsigned long mask, pfn, pageno, start = 0;
103
104
105 if (!cma || !cma->count)
106 return NULL;
107
108 pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
109 (void *)cma, nr_pages, align_pages);
110
111 if (!nr_pages)
112 return NULL;
113 /*
114 * align mask with chunk size. The bit tracks pages in chunk size
115 */
116 VM_BUG_ON(!is_power_of_2(align_pages));
117 mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
118 BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
119
120 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
121 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
122
123 mutex_lock(&kvm_cma_mutex);
124 for (;;) {
125 pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
126 start, nr_chunk, mask);
127 if (pageno >= chunk_count)
128 break;
129
130 pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
131 ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
132 if (ret == 0) {
133 bitmap_set(cma->bitmap, pageno, nr_chunk);
134 page = pfn_to_page(pfn);
135 memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
136 break;
137 } else if (ret != -EBUSY) {
138 break;
139 }
140 pr_debug("%s(): memory range at %p is busy, retrying\n",
141 __func__, pfn_to_page(pfn));
142 /* try again with a bit different memory target */
143 start = pageno + mask + 1;
144 }
145 mutex_unlock(&kvm_cma_mutex);
146 pr_debug("%s(): returned %p\n", __func__, page);
147 return page;
148}
149
150/**
151 * kvm_release_cma() - release allocated pages for hash pagetable
152 * @pages: Allocated pages.
153 * @nr_pages: Number of allocated pages.
154 *
155 * This function releases memory allocated by kvm_alloc_cma().
156 * It returns false when provided pages do not belong to contiguous area and
157 * true otherwise.
158 */
159bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
160{
161 unsigned long pfn;
162 unsigned long nr_chunk;
163 struct kvm_cma *cma = &kvm_cma_area;
164
165 if (!cma || !pages)
166 return false;
167
168 pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
169
170 pfn = page_to_pfn(pages);
171
172 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
173 return false;
174
175 VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
176 nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
177
178 mutex_lock(&kvm_cma_mutex);
179 bitmap_clear(cma->bitmap,
180 (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
181 nr_chunk);
182 free_contig_range(pfn, nr_pages);
183 mutex_unlock(&kvm_cma_mutex);
184
185 return true;
186}
187
188static int __init kvm_cma_activate_area(unsigned long base_pfn,
189 unsigned long count)
190{
191 unsigned long pfn = base_pfn;
192 unsigned i = count >> pageblock_order;
193 struct zone *zone;
194
195 WARN_ON_ONCE(!pfn_valid(pfn));
196 zone = page_zone(pfn_to_page(pfn));
197 do {
198 unsigned j;
199 base_pfn = pfn;
200 for (j = pageblock_nr_pages; j; --j, pfn++) {
201 WARN_ON_ONCE(!pfn_valid(pfn));
202 /*
203 * alloc_contig_range requires the pfn range
204 * specified to be in the same zone. Make this
205 * simple by forcing the entire CMA resv range
206 * to be in the same zone.
207 */
208 if (page_zone(pfn_to_page(pfn)) != zone)
209 return -EINVAL;
210 }
211 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
212 } while (--i);
213 return 0;
214}
215
216static int __init kvm_cma_init_reserved_areas(void)
217{
218 int bitmap_size, ret;
219 unsigned long chunk_count;
220 struct kvm_cma *cma = &kvm_cma_area;
221
222 pr_debug("%s()\n", __func__);
223 if (!cma->count)
224 return 0;
225 chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
226 bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
227 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
228 if (!cma->bitmap)
229 return -ENOMEM;
230
231 ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
232 if (ret)
233 goto error;
234 return 0;
235
236error:
237 kfree(cma->bitmap);
238 return ret;
239}
240core_initcall(kvm_cma_init_reserved_areas);
diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h
new file mode 100644
index 000000000000..655144f75fa5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.h
@@ -0,0 +1,27 @@
1/*
2 * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
3 * for DMA mapping framework
4 *
5 * Copyright IBM Corporation, 2013
6 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 *
13 */
14
15#ifndef __POWERPC_KVM_CMA_ALLOC_H__
16#define __POWERPC_KVM_CMA_ALLOC_H__
17/*
18 * Both RMA and Hash page allocation will be multiple of 256K.
19 */
20#define KVM_CMA_CHUNK_ORDER 18
21
22extern struct page *kvm_alloc_cma(unsigned long nr_pages,
23 unsigned long align_pages);
24extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
25extern long kvm_cma_declare_contiguous(phys_addr_t size,
26 phys_addr_t alignment) __init;
27#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index fc25689a9f35..9c515440ad1a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -363,7 +363,11 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
363 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]); 363 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
364} 364}
365 365
366#ifdef __BIG_ENDIAN__
366#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 367#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
368#else
369#define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
370#endif
367 371
368static inline int try_lock_tlbie(unsigned int *lock) 372static inline int try_lock_tlbie(unsigned int *lock)
369{ 373{
@@ -383,6 +387,80 @@ static inline int try_lock_tlbie(unsigned int *lock)
383 return old == 0; 387 return old == 0;
384} 388}
385 389
390/*
391 * tlbie/tlbiel is a bit different on the PPC970 compared to later
392 * processors such as POWER7; the large page bit is in the instruction
393 * not RB, and the top 16 bits and the bottom 12 bits of the VA
394 * in RB must be 0.
395 */
396static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
397 long npages, int global, bool need_sync)
398{
399 long i;
400
401 if (global) {
402 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
403 cpu_relax();
404 if (need_sync)
405 asm volatile("ptesync" : : : "memory");
406 for (i = 0; i < npages; ++i) {
407 unsigned long rb = rbvalues[i];
408
409 if (rb & 1) /* large page */
410 asm volatile("tlbie %0,1" : :
411 "r" (rb & 0x0000fffffffff000ul));
412 else
413 asm volatile("tlbie %0,0" : :
414 "r" (rb & 0x0000fffffffff000ul));
415 }
416 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
417 kvm->arch.tlbie_lock = 0;
418 } else {
419 if (need_sync)
420 asm volatile("ptesync" : : : "memory");
421 for (i = 0; i < npages; ++i) {
422 unsigned long rb = rbvalues[i];
423
424 if (rb & 1) /* large page */
425 asm volatile("tlbiel %0,1" : :
426 "r" (rb & 0x0000fffffffff000ul));
427 else
428 asm volatile("tlbiel %0,0" : :
429 "r" (rb & 0x0000fffffffff000ul));
430 }
431 asm volatile("ptesync" : : : "memory");
432 }
433}
434
435static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
436 long npages, int global, bool need_sync)
437{
438 long i;
439
440 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
441 /* PPC970 tlbie instruction is a bit different */
442 do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
443 return;
444 }
445 if (global) {
446 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
447 cpu_relax();
448 if (need_sync)
449 asm volatile("ptesync" : : : "memory");
450 for (i = 0; i < npages; ++i)
451 asm volatile(PPC_TLBIE(%1,%0) : :
452 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
453 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
454 kvm->arch.tlbie_lock = 0;
455 } else {
456 if (need_sync)
457 asm volatile("ptesync" : : : "memory");
458 for (i = 0; i < npages; ++i)
459 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
460 asm volatile("ptesync" : : : "memory");
461 }
462}
463
386long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 464long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
387 unsigned long pte_index, unsigned long avpn, 465 unsigned long pte_index, unsigned long avpn,
388 unsigned long *hpret) 466 unsigned long *hpret)
@@ -408,19 +486,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
408 if (v & HPTE_V_VALID) { 486 if (v & HPTE_V_VALID) {
409 hpte[0] &= ~HPTE_V_VALID; 487 hpte[0] &= ~HPTE_V_VALID;
410 rb = compute_tlbie_rb(v, hpte[1], pte_index); 488 rb = compute_tlbie_rb(v, hpte[1], pte_index);
411 if (global_invalidates(kvm, flags)) { 489 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
412 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
413 cpu_relax();
414 asm volatile("ptesync" : : : "memory");
415 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
416 : : "r" (rb), "r" (kvm->arch.lpid));
417 asm volatile("ptesync" : : : "memory");
418 kvm->arch.tlbie_lock = 0;
419 } else {
420 asm volatile("ptesync" : : : "memory");
421 asm volatile("tlbiel %0" : : "r" (rb));
422 asm volatile("ptesync" : : : "memory");
423 }
424 /* Read PTE low word after tlbie to get final R/C values */ 490 /* Read PTE low word after tlbie to get final R/C values */
425 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]); 491 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
426 } 492 }
@@ -448,12 +514,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
448 unsigned long *hp, *hptes[4], tlbrb[4]; 514 unsigned long *hp, *hptes[4], tlbrb[4];
449 long int i, j, k, n, found, indexes[4]; 515 long int i, j, k, n, found, indexes[4];
450 unsigned long flags, req, pte_index, rcbits; 516 unsigned long flags, req, pte_index, rcbits;
451 long int local = 0; 517 int global;
452 long int ret = H_SUCCESS; 518 long int ret = H_SUCCESS;
453 struct revmap_entry *rev, *revs[4]; 519 struct revmap_entry *rev, *revs[4];
454 520
455 if (atomic_read(&kvm->online_vcpus) == 1) 521 global = global_invalidates(kvm, 0);
456 local = 1;
457 for (i = 0; i < 4 && ret == H_SUCCESS; ) { 522 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
458 n = 0; 523 n = 0;
459 for (; i < 4; ++i) { 524 for (; i < 4; ++i) {
@@ -529,22 +594,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
529 break; 594 break;
530 595
531 /* Now that we've collected a batch, do the tlbies */ 596 /* Now that we've collected a batch, do the tlbies */
532 if (!local) { 597 do_tlbies(kvm, tlbrb, n, global, true);
533 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
534 cpu_relax();
535 asm volatile("ptesync" : : : "memory");
536 for (k = 0; k < n; ++k)
537 asm volatile(PPC_TLBIE(%1,%0) : :
538 "r" (tlbrb[k]),
539 "r" (kvm->arch.lpid));
540 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
541 kvm->arch.tlbie_lock = 0;
542 } else {
543 asm volatile("ptesync" : : : "memory");
544 for (k = 0; k < n; ++k)
545 asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
546 asm volatile("ptesync" : : : "memory");
547 }
548 598
549 /* Read PTE low words after tlbie to get final R/C values */ 599 /* Read PTE low words after tlbie to get final R/C values */
550 for (k = 0; k < n; ++k) { 600 for (k = 0; k < n; ++k) {
@@ -603,19 +653,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
603 if (v & HPTE_V_VALID) { 653 if (v & HPTE_V_VALID) {
604 rb = compute_tlbie_rb(v, r, pte_index); 654 rb = compute_tlbie_rb(v, r, pte_index);
605 hpte[0] = v & ~HPTE_V_VALID; 655 hpte[0] = v & ~HPTE_V_VALID;
606 if (global_invalidates(kvm, flags)) { 656 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
607 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
608 cpu_relax();
609 asm volatile("ptesync" : : : "memory");
610 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
611 : : "r" (rb), "r" (kvm->arch.lpid));
612 asm volatile("ptesync" : : : "memory");
613 kvm->arch.tlbie_lock = 0;
614 } else {
615 asm volatile("ptesync" : : : "memory");
616 asm volatile("tlbiel %0" : : "r" (rb));
617 asm volatile("ptesync" : : : "memory");
618 }
619 /* 657 /*
620 * If the host has this page as readonly but the guest 658 * If the host has this page as readonly but the guest
621 * wants to make it read/write, reduce the permissions. 659 * wants to make it read/write, reduce the permissions.
@@ -686,13 +724,7 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
686 724
687 hptep[0] &= ~HPTE_V_VALID; 725 hptep[0] &= ~HPTE_V_VALID;
688 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index); 726 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
689 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 727 do_tlbies(kvm, &rb, 1, 1, true);
690 cpu_relax();
691 asm volatile("ptesync" : : : "memory");
692 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
693 : : "r" (rb), "r" (kvm->arch.lpid));
694 asm volatile("ptesync" : : : "memory");
695 kvm->arch.tlbie_lock = 0;
696} 728}
697EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); 729EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
698 730
@@ -706,12 +738,7 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
706 rbyte = (hptep[1] & ~HPTE_R_R) >> 8; 738 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
707 /* modify only the second-last byte, which contains the ref bit */ 739 /* modify only the second-last byte, which contains the ref bit */
708 *((char *)hptep + 14) = rbyte; 740 *((char *)hptep + 14) = rbyte;
709 while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) 741 do_tlbies(kvm, &rb, 1, 1, false);
710 cpu_relax();
711 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
712 : : "r" (rb), "r" (kvm->arch.lpid));
713 asm volatile("ptesync" : : : "memory");
714 kvm->arch.tlbie_lock = 0;
715} 742}
716EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); 743EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
717 744
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e4c70d..294b7af28cdd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -29,6 +29,10 @@
29#include <asm/kvm_book3s_asm.h> 29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h> 30#include <asm/mmu-hash64.h>
31 31
32#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif
35
32/***************************************************************************** 36/*****************************************************************************
33 * * 37 * *
34 * Real Mode handlers that need to be in the linear mapping * 38 * Real Mode handlers that need to be in the linear mapping *
@@ -389,7 +393,11 @@ toc_tlbie_lock:
389 .tc native_tlbie_lock[TC],native_tlbie_lock 393 .tc native_tlbie_lock[TC],native_tlbie_lock
390 .previous 394 .previous
391 ld r3,toc_tlbie_lock@toc(2) 395 ld r3,toc_tlbie_lock@toc(2)
396#ifdef __BIG_ENDIAN__
392 lwz r8,PACA_LOCK_TOKEN(r13) 397 lwz r8,PACA_LOCK_TOKEN(r13)
398#else
399 lwz r8,PACAPACAINDEX(r13)
400#endif
39324: lwarx r0,0,r3 40124: lwarx r0,0,r3
394 cmpwi r0,0 402 cmpwi r0,0
395 bne 24b 403 bne 24b
@@ -964,7 +972,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
96432: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ 97232: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
965 973
966 /* Take the guest's tlbie_lock */ 974 /* Take the guest's tlbie_lock */
975#ifdef __BIG_ENDIAN__
967 lwz r8,PACA_LOCK_TOKEN(r13) 976 lwz r8,PACA_LOCK_TOKEN(r13)
977#else
978 lwz r8,PACAPACAINDEX(r13)
979#endif
968 addi r3,r4,KVM_TLBIE_LOCK 980 addi r3,r4,KVM_TLBIE_LOCK
96924: lwarx r0,0,r3 98124: lwarx r0,0,r3
970 cmpwi r0,0 982 cmpwi r0,0
@@ -1381,7 +1393,7 @@ hcall_try_real_mode:
1381 cmpldi r3,hcall_real_table_end - hcall_real_table 1393 cmpldi r3,hcall_real_table_end - hcall_real_table
1382 bge guest_exit_cont 1394 bge guest_exit_cont
1383 LOAD_REG_ADDR(r4, hcall_real_table) 1395 LOAD_REG_ADDR(r4, hcall_real_table)
1384 lwzx r3,r3,r4 1396 lwax r3,r3,r4
1385 cmpwi r3,0 1397 cmpwi r3,0
1386 beq guest_exit_cont 1398 beq guest_exit_cont
1387 add r3,r3,r4 1399 add r3,r3,r4
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 48cbbf862958..17cfae5497a3 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -92,6 +92,11 @@ kvm_start_lightweight:
92 PPC_LL r3, VCPU_HFLAGS(r4) 92 PPC_LL r3, VCPU_HFLAGS(r4)
93 rldicl r3, r3, 0, 63 /* r3 &= 1 */ 93 rldicl r3, r3, 0, 63 /* r3 &= 1 */
94 stb r3, HSTATE_RESTORE_HID5(r13) 94 stb r3, HSTATE_RESTORE_HID5(r13)
95
96 /* Load up guest SPRG3 value, since it's user readable */
97 ld r3, VCPU_SHARED(r4)
98 ld r3, VCPU_SHARED_SPRG3(r3)
99 mtspr SPRN_SPRG3, r3
95#endif /* CONFIG_PPC_BOOK3S_64 */ 100#endif /* CONFIG_PPC_BOOK3S_64 */
96 101
97 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */ 102 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
@@ -123,6 +128,15 @@ kvmppc_handler_highmem:
123 /* R7 = vcpu */ 128 /* R7 = vcpu */
124 PPC_LL r7, GPR4(r1) 129 PPC_LL r7, GPR4(r1)
125 130
131#ifdef CONFIG_PPC_BOOK3S_64
132 /*
133 * Reload kernel SPRG3 value.
134 * No need to save guest value as usermode can't modify SPRG3.
135 */
136 ld r3, PACA_SPRG3(r13)
137 mtspr SPRN_SPRG3, r3
138#endif /* CONFIG_PPC_BOOK3S_64 */
139
126 PPC_STL r14, VCPU_GPR(R14)(r7) 140 PPC_STL r14, VCPU_GPR(R14)(r7)
127 PPC_STL r15, VCPU_GPR(R15)(r7) 141 PPC_STL r15, VCPU_GPR(R15)(r7)
128 PPC_STL r16, VCPU_GPR(R16)(r7) 142 PPC_STL r16, VCPU_GPR(R16)(r7)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a567a81..27db1e665959 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -468,7 +468,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
468 * both the traditional FP registers and the added VSX 468 * both the traditional FP registers and the added VSX
469 * registers into thread.fpr[]. 469 * registers into thread.fpr[].
470 */ 470 */
471 giveup_fpu(current); 471 if (current->thread.regs->msr & MSR_FP)
472 giveup_fpu(current);
472 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 473 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
473 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 474 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
474 475
@@ -483,7 +484,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
483 484
484#ifdef CONFIG_ALTIVEC 485#ifdef CONFIG_ALTIVEC
485 if (msr & MSR_VEC) { 486 if (msr & MSR_VEC) {
486 giveup_altivec(current); 487 if (current->thread.regs->msr & MSR_VEC)
488 giveup_altivec(current);
487 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 489 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
488 vcpu->arch.vscr = t->vscr; 490 vcpu->arch.vscr = t->vscr;
489 } 491 }
@@ -575,8 +577,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
575 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 577 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
576#endif 578#endif
577 579
578 current->thread.regs->msr |= msr;
579
580 if (msr & MSR_FP) { 580 if (msr & MSR_FP) {
581 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 581 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
582 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 582 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
@@ -598,12 +598,32 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
598#endif 598#endif
599 } 599 }
600 600
601 current->thread.regs->msr |= msr;
601 vcpu->arch.guest_owned_ext |= msr; 602 vcpu->arch.guest_owned_ext |= msr;
602 kvmppc_recalc_shadow_msr(vcpu); 603 kvmppc_recalc_shadow_msr(vcpu);
603 604
604 return RESUME_GUEST; 605 return RESUME_GUEST;
605} 606}
606 607
608/*
609 * Kernel code using FP or VMX could have flushed guest state to
610 * the thread_struct; if so, get it back now.
611 */
612static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
613{
614 unsigned long lost_ext;
615
616 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
617 if (!lost_ext)
618 return;
619
620 if (lost_ext & MSR_FP)
621 kvmppc_load_up_fpu();
622 if (lost_ext & MSR_VEC)
623 kvmppc_load_up_altivec();
624 current->thread.regs->msr |= lost_ext;
625}
626
607int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 627int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
608 unsigned int exit_nr) 628 unsigned int exit_nr)
609{ 629{
@@ -772,7 +792,7 @@ program_interrupt:
772 } 792 }
773 case BOOK3S_INTERRUPT_SYSCALL: 793 case BOOK3S_INTERRUPT_SYSCALL:
774 if (vcpu->arch.papr_enabled && 794 if (vcpu->arch.papr_enabled &&
775 (kvmppc_get_last_inst(vcpu) == 0x44000022) && 795 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
776 !(vcpu->arch.shared->msr & MSR_PR)) { 796 !(vcpu->arch.shared->msr & MSR_PR)) {
777 /* SC 1 papr hypercalls */ 797 /* SC 1 papr hypercalls */
778 ulong cmd = kvmppc_get_gpr(vcpu, 3); 798 ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -890,8 +910,9 @@ program_interrupt:
890 local_irq_enable(); 910 local_irq_enable();
891 r = s; 911 r = s;
892 } else { 912 } else {
893 kvmppc_lazy_ee_enable(); 913 kvmppc_fix_ee_before_entry();
894 } 914 }
915 kvmppc_handle_lost_ext(vcpu);
895 } 916 }
896 917
897 trace_kvm_book3s_reenter(r, vcpu); 918 trace_kvm_book3s_reenter(r, vcpu);
@@ -1047,11 +1068,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1047 if (err) 1068 if (err)
1048 goto free_shadow_vcpu; 1069 goto free_shadow_vcpu;
1049 1070
1071 err = -ENOMEM;
1050 p = __get_free_page(GFP_KERNEL|__GFP_ZERO); 1072 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1051 /* the real shared page fills the last 4k of our page */
1052 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1053 if (!p) 1073 if (!p)
1054 goto uninit_vcpu; 1074 goto uninit_vcpu;
1075 /* the real shared page fills the last 4k of our page */
1076 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
1055 1077
1056#ifdef CONFIG_PPC_BOOK3S_64 1078#ifdef CONFIG_PPC_BOOK3S_64
1057 /* default to book3s_64 (970fx) */ 1079 /* default to book3s_64 (970fx) */
@@ -1161,7 +1183,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1161 if (vcpu->arch.shared->msr & MSR_FP) 1183 if (vcpu->arch.shared->msr & MSR_FP)
1162 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1184 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1163 1185
1164 kvmppc_lazy_ee_enable(); 1186 kvmppc_fix_ee_before_entry();
1165 1187
1166 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 1188 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1167 1189
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83d..a3a5cb8ee7ea 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
19#include <asm/hvcall.h> 19#include <asm/hvcall.h>
20#include <asm/xics.h> 20#include <asm/xics.h>
21#include <asm/debug.h> 21#include <asm/debug.h>
22#include <asm/time.h>
22 23
23#include <linux/debugfs.h> 24#include <linux/debugfs.h>
24#include <linux/seq_file.h> 25#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index dcc94f016007..17722d82f1d1 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -674,8 +674,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
674 goto out; 674 goto out;
675 } 675 }
676 676
677 kvm_guest_enter();
678
679#ifdef CONFIG_PPC_FPU 677#ifdef CONFIG_PPC_FPU
680 /* Save userspace FPU state in stack */ 678 /* Save userspace FPU state in stack */
681 enable_kernel_fp(); 679 enable_kernel_fp();
@@ -698,7 +696,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
698 kvmppc_load_guest_fp(vcpu); 696 kvmppc_load_guest_fp(vcpu);
699#endif 697#endif
700 698
701 kvmppc_lazy_ee_enable(); 699 kvmppc_fix_ee_before_entry();
702 700
703 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 701 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
704 702
@@ -1168,7 +1166,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
1168 local_irq_enable(); 1166 local_irq_enable();
1169 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 1167 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1170 } else { 1168 } else {
1171 kvmppc_lazy_ee_enable(); 1169 kvmppc_fix_ee_before_entry();
1172 } 1170 }
1173 } 1171 }
1174 1172
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 2c52ada30775..751cd45f65a0 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -30,53 +30,10 @@
30#include <asm/byteorder.h> 30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h> 31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h> 32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
33#include "timing.h" 34#include "timing.h"
34#include "trace.h" 35#include "trace.h"
35 36
36#define OP_TRAP 3
37#define OP_TRAP_64 2
38
39#define OP_31_XOP_TRAP 4
40#define OP_31_XOP_LWZX 23
41#define OP_31_XOP_DCBST 54
42#define OP_31_XOP_TRAP_64 68
43#define OP_31_XOP_DCBF 86
44#define OP_31_XOP_LBZX 87
45#define OP_31_XOP_STWX 151
46#define OP_31_XOP_STBX 215
47#define OP_31_XOP_LBZUX 119
48#define OP_31_XOP_STBUX 247
49#define OP_31_XOP_LHZX 279
50#define OP_31_XOP_LHZUX 311
51#define OP_31_XOP_MFSPR 339
52#define OP_31_XOP_LHAX 343
53#define OP_31_XOP_STHX 407
54#define OP_31_XOP_STHUX 439
55#define OP_31_XOP_MTSPR 467
56#define OP_31_XOP_DCBI 470
57#define OP_31_XOP_LWBRX 534
58#define OP_31_XOP_TLBSYNC 566
59#define OP_31_XOP_STWBRX 662
60#define OP_31_XOP_LHBRX 790
61#define OP_31_XOP_STHBRX 918
62
63#define OP_LWZ 32
64#define OP_LD 58
65#define OP_LWZU 33
66#define OP_LBZ 34
67#define OP_LBZU 35
68#define OP_STW 36
69#define OP_STWU 37
70#define OP_STD 62
71#define OP_STB 38
72#define OP_STBU 39
73#define OP_LHZ 40
74#define OP_LHZU 41
75#define OP_LHA 42
76#define OP_LHAU 43
77#define OP_STH 44
78#define OP_STHU 45
79
80void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 37void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
81{ 38{
82 unsigned long dec_nsec; 39 unsigned long dec_nsec;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6316ee336e88..07c0106fab76 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
117 kvm_guest_exit(); 117 kvm_guest_exit();
118 continue; 118 continue;
119 } 119 }
120
121 trace_hardirqs_on();
122#endif 120#endif
123 121
124 kvm_guest_enter(); 122 kvm_guest_enter();
@@ -420,6 +418,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
420 return kvmppc_core_create_memslot(slot, npages); 418 return kvmppc_core_create_memslot(slot, npages);
421} 419}
422 420
421void kvm_arch_memslots_updated(struct kvm *kvm)
422{
423}
424
423int kvm_arch_prepare_memory_region(struct kvm *kvm, 425int kvm_arch_prepare_memory_region(struct kvm *kvm,
424 struct kvm_memory_slot *memslot, 426 struct kvm_memory_slot *memslot,
425 struct kvm_userspace_memory_region *mem, 427 struct kvm_userspace_memory_region *mem,
@@ -823,39 +825,39 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
823#endif 825#endif
824#ifdef CONFIG_KVM_MPIC 826#ifdef CONFIG_KVM_MPIC
825 case KVM_CAP_IRQ_MPIC: { 827 case KVM_CAP_IRQ_MPIC: {
826 struct file *filp; 828 struct fd f;
827 struct kvm_device *dev; 829 struct kvm_device *dev;
828 830
829 r = -EBADF; 831 r = -EBADF;
830 filp = fget(cap->args[0]); 832 f = fdget(cap->args[0]);
831 if (!filp) 833 if (!f.file)
832 break; 834 break;
833 835
834 r = -EPERM; 836 r = -EPERM;
835 dev = kvm_device_from_filp(filp); 837 dev = kvm_device_from_filp(f.file);
836 if (dev) 838 if (dev)
837 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); 839 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
838 840
839 fput(filp); 841 fdput(f);
840 break; 842 break;
841 } 843 }
842#endif 844#endif
843#ifdef CONFIG_KVM_XICS 845#ifdef CONFIG_KVM_XICS
844 case KVM_CAP_IRQ_XICS: { 846 case KVM_CAP_IRQ_XICS: {
845 struct file *filp; 847 struct fd f;
846 struct kvm_device *dev; 848 struct kvm_device *dev;
847 849
848 r = -EBADF; 850 r = -EBADF;
849 filp = fget(cap->args[0]); 851 f = fdget(cap->args[0]);
850 if (!filp) 852 if (!f.file)
851 break; 853 break;
852 854
853 r = -EPERM; 855 r = -EPERM;
854 dev = kvm_device_from_filp(filp); 856 dev = kvm_device_from_filp(f.file);
855 if (dev) 857 if (dev)
856 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); 858 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
857 859
858 fput(filp); 860 fdput(f);
859 break; 861 break;
860 } 862 }
861#endif /* CONFIG_KVM_XICS */ 863#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index bb7cfecf2788..0c9c8d7d0734 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -32,7 +32,7 @@ void __spin_yield(arch_spinlock_t *lock)
32 return; 32 return;
33 holder_cpu = lock_value & 0xffff; 33 holder_cpu = lock_value & 0xffff;
34 BUG_ON(holder_cpu >= NR_CPUS); 34 BUG_ON(holder_cpu >= NR_CPUS);
35 yield_count = lppaca_of(holder_cpu).yield_count; 35 yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
36 if ((yield_count & 1) == 0) 36 if ((yield_count & 1) == 0)
37 return; /* virtual cpu is currently running */ 37 return; /* virtual cpu is currently running */
38 rmb(); 38 rmb();
@@ -57,7 +57,7 @@ void __rw_yield(arch_rwlock_t *rw)
57 return; /* no write lock at present */ 57 return; /* no write lock at present */
58 holder_cpu = lock_value & 0xffff; 58 holder_cpu = lock_value & 0xffff;
59 BUG_ON(holder_cpu >= NR_CPUS); 59 BUG_ON(holder_cpu >= NR_CPUS);
60 yield_count = lppaca_of(holder_cpu).yield_count; 60 yield_count = be32_to_cpu(lppaca_of(holder_cpu).yield_count);
61 if ((yield_count & 1) == 0) 61 if ((yield_count & 1) == 0)
62 return; /* virtual cpu is currently running */ 62 return; /* virtual cpu is currently running */
63 rmb(); 63 rmb();
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 99c7fc16dc0d..a7ee978fb860 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -100,8 +100,10 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs
100 ea = (signed short) instr; /* sign-extend */ 100 ea = (signed short) instr; /* sign-extend */
101 if (ra) { 101 if (ra) {
102 ea += regs->gpr[ra]; 102 ea += regs->gpr[ra];
103 if (instr & 0x04000000) /* update forms */ 103 if (instr & 0x04000000) { /* update forms */
104 regs->gpr[ra] = ea; 104 if ((instr>>26) != 47) /* stmw is not an update form */
105 regs->gpr[ra] = ea;
106 }
105 } 107 }
106 108
107 return truncate_if_32bit(regs->msr, ea); 109 return truncate_if_32bit(regs->msr, ea);
@@ -279,7 +281,7 @@ static int __kprobes write_mem_unaligned(unsigned long val, unsigned long ea,
279 err = write_mem_aligned(val >> (nb - c) * 8, ea, c); 281 err = write_mem_aligned(val >> (nb - c) * 8, ea, c);
280 if (err) 282 if (err)
281 return err; 283 return err;
282 ++ea; 284 ea += c;
283 } 285 }
284 return 0; 286 return 0;
285} 287}
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile
index 8d035d2d42a6..1b46ab4f6417 100644
--- a/arch/powerpc/math-emu/Makefile
+++ b/arch/powerpc/math-emu/Makefile
@@ -1,15 +1,15 @@
1 1math-emu-common-objs = math.o fre.o fsqrt.o fsqrts.o frsqrtes.o mtfsf.o mtfsfi.o
2obj-$(CONFIG_MATH_EMULATION) += fabs.o fadd.o fadds.o fcmpo.o fcmpu.o \ 2obj-$(CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED) += $(math-emu-common-objs)
3 fctiw.o fctiwz.o fdiv.o fdivs.o \ 3obj-$(CONFIG_MATH_EMULATION_FULL) += $(math-emu-common-objs) fabs.o fadd.o \
4 fmadd.o fmadds.o fmsub.o fmsubs.o \ 4 fadds.o fcmpo.o fcmpu.o fctiw.o \
5 fmul.o fmuls.o fnabs.o fneg.o \ 5 fctiwz.o fdiv.o fdivs.o fmadd.o \
6 fnmadd.o fnmadds.o fnmsub.o fnmsubs.o \ 6 fmadds.o fmsub.o fmsubs.o fmul.o \
7 fres.o fre.o frsp.o fsel.o lfs.o \ 7 fmuls.o fnabs.o fneg.o fnmadd.o \
8 frsqrte.o frsqrtes.o \ 8 fnmadds.o fnmsub.o fnmsubs.o fres.o \
9 fsqrt.o fsqrts.o fsub.o fsubs.o \ 9 frsp.o fsel.o lfs.o frsqrte.o fsub.o \
10 mcrfs.o mffs.o mtfsb0.o mtfsb1.o \ 10 fsubs.o mcrfs.o mffs.o mtfsb0.o \
11 mtfsf.o mtfsfi.o stfiwx.o stfs.o \ 11 mtfsb1.o stfiwx.o stfs.o math.o \
12 math.o fmr.o lfd.o stfd.o 12 fmr.o lfd.o stfd.o
13 13
14obj-$(CONFIG_SPE) += math_efp.o 14obj-$(CONFIG_SPE) += math_efp.o
15 15
diff --git a/arch/powerpc/math-emu/math.c b/arch/powerpc/math-emu/math.c
index 0328e66e0799..ab151f040502 100644
--- a/arch/powerpc/math-emu/math.c
+++ b/arch/powerpc/math-emu/math.c
@@ -7,12 +7,27 @@
7 7
8#include <asm/uaccess.h> 8#include <asm/uaccess.h>
9#include <asm/reg.h> 9#include <asm/reg.h>
10#include <asm/switch_to.h>
10 11
11#include <asm/sfp-machine.h> 12#include <asm/sfp-machine.h>
12#include <math-emu/double.h> 13#include <math-emu/double.h>
13 14
14#define FLOATFUNC(x) extern int x(void *, void *, void *, void *) 15#define FLOATFUNC(x) extern int x(void *, void *, void *, void *)
15 16
17/* The instructions list which may be not implemented by a hardware FPU */
18FLOATFUNC(fre);
19FLOATFUNC(frsqrtes);
20FLOATFUNC(fsqrt);
21FLOATFUNC(fsqrts);
22FLOATFUNC(mtfsf);
23FLOATFUNC(mtfsfi);
24
25#ifdef CONFIG_MATH_EMULATION_HW_UNIMPLEMENTED
26#undef FLOATFUNC(x)
27#define FLOATFUNC(x) static inline int x(void *op1, void *op2, void *op3, \
28 void *op4) { }
29#endif
30
16FLOATFUNC(fadd); 31FLOATFUNC(fadd);
17FLOATFUNC(fadds); 32FLOATFUNC(fadds);
18FLOATFUNC(fdiv); 33FLOATFUNC(fdiv);
@@ -42,8 +57,6 @@ FLOATFUNC(mcrfs);
42FLOATFUNC(mffs); 57FLOATFUNC(mffs);
43FLOATFUNC(mtfsb0); 58FLOATFUNC(mtfsb0);
44FLOATFUNC(mtfsb1); 59FLOATFUNC(mtfsb1);
45FLOATFUNC(mtfsf);
46FLOATFUNC(mtfsfi);
47 60
48FLOATFUNC(lfd); 61FLOATFUNC(lfd);
49FLOATFUNC(lfs); 62FLOATFUNC(lfs);
@@ -58,13 +71,9 @@ FLOATFUNC(fnabs);
58FLOATFUNC(fneg); 71FLOATFUNC(fneg);
59 72
60/* Optional */ 73/* Optional */
61FLOATFUNC(fre);
62FLOATFUNC(fres); 74FLOATFUNC(fres);
63FLOATFUNC(frsqrte); 75FLOATFUNC(frsqrte);
64FLOATFUNC(frsqrtes);
65FLOATFUNC(fsel); 76FLOATFUNC(fsel);
66FLOATFUNC(fsqrt);
67FLOATFUNC(fsqrts);
68 77
69 78
70#define OP31 0x1f /* 31 */ 79#define OP31 0x1f /* 31 */
@@ -154,7 +163,6 @@ FLOATFUNC(fsqrts);
154#define XEU 15 163#define XEU 15
155#define XFLB 10 164#define XFLB 10
156 165
157#ifdef CONFIG_MATH_EMULATION
158static int 166static int
159record_exception(struct pt_regs *regs, int eflag) 167record_exception(struct pt_regs *regs, int eflag)
160{ 168{
@@ -212,7 +220,6 @@ record_exception(struct pt_regs *regs, int eflag)
212 220
213 return (fpscr & FPSCR_FEX) ? 1 : 0; 221 return (fpscr & FPSCR_FEX) ? 1 : 0;
214} 222}
215#endif /* CONFIG_MATH_EMULATION */
216 223
217int 224int
218do_mathemu(struct pt_regs *regs) 225do_mathemu(struct pt_regs *regs)
@@ -222,56 +229,13 @@ do_mathemu(struct pt_regs *regs)
222 signed short sdisp; 229 signed short sdisp;
223 u32 insn = 0; 230 u32 insn = 0;
224 int idx = 0; 231 int idx = 0;
225#ifdef CONFIG_MATH_EMULATION
226 int (*func)(void *, void *, void *, void *); 232 int (*func)(void *, void *, void *, void *);
227 int type = 0; 233 int type = 0;
228 int eflag, trap; 234 int eflag, trap;
229#endif
230 235
231 if (get_user(insn, (u32 *)pc)) 236 if (get_user(insn, (u32 *)pc))
232 return -EFAULT; 237 return -EFAULT;
233 238
234#ifndef CONFIG_MATH_EMULATION
235 switch (insn >> 26) {
236 case LFD:
237 idx = (insn >> 16) & 0x1f;
238 sdisp = (insn & 0xffff);
239 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
240 op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
241 lfd(op0, op1, op2, op3);
242 break;
243 case LFDU:
244 idx = (insn >> 16) & 0x1f;
245 sdisp = (insn & 0xffff);
246 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
247 op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
248 lfd(op0, op1, op2, op3);
249 regs->gpr[idx] = (unsigned long)op1;
250 break;
251 case STFD:
252 idx = (insn >> 16) & 0x1f;
253 sdisp = (insn & 0xffff);
254 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
255 op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
256 stfd(op0, op1, op2, op3);
257 break;
258 case STFDU:
259 idx = (insn >> 16) & 0x1f;
260 sdisp = (insn & 0xffff);
261 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
262 op1 = (void *)((idx ? regs->gpr[idx] : 0) + sdisp);
263 stfd(op0, op1, op2, op3);
264 regs->gpr[idx] = (unsigned long)op1;
265 break;
266 case OP63:
267 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
268 op1 = (void *)&current->thread.TS_FPR((insn >> 11) & 0x1f);
269 fmr(op0, op1, op2, op3);
270 break;
271 default:
272 goto illegal;
273 }
274#else /* CONFIG_MATH_EMULATION */
275 switch (insn >> 26) { 239 switch (insn >> 26) {
276 case LFS: func = lfs; type = D; break; 240 case LFS: func = lfs; type = D; break;
277 case LFSU: func = lfs; type = DU; break; 241 case LFSU: func = lfs; type = DU; break;
@@ -416,21 +380,16 @@ do_mathemu(struct pt_regs *regs)
416 case XE: 380 case XE:
417 idx = (insn >> 16) & 0x1f; 381 idx = (insn >> 16) & 0x1f;
418 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); 382 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
419 if (!idx) { 383 op1 = (void *)((idx ? regs->gpr[idx] : 0)
420 if (((insn >> 1) & 0x3ff) == STFIWX) 384 + regs->gpr[(insn >> 11) & 0x1f]);
421 op1 = (void *)(regs->gpr[(insn >> 11) & 0x1f]);
422 else
423 goto illegal;
424 } else {
425 op1 = (void *)(regs->gpr[idx] + regs->gpr[(insn >> 11) & 0x1f]);
426 }
427
428 break; 385 break;
429 386
430 case XEU: 387 case XEU:
431 idx = (insn >> 16) & 0x1f; 388 idx = (insn >> 16) & 0x1f;
389 if (!idx)
390 goto illegal;
432 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f); 391 op0 = (void *)&current->thread.TS_FPR((insn >> 21) & 0x1f);
433 op1 = (void *)((idx ? regs->gpr[idx] : 0) 392 op1 = (void *)(regs->gpr[idx]
434 + regs->gpr[(insn >> 11) & 0x1f]); 393 + regs->gpr[(insn >> 11) & 0x1f]);
435 break; 394 break;
436 395
@@ -465,6 +424,13 @@ do_mathemu(struct pt_regs *regs)
465 goto illegal; 424 goto illegal;
466 } 425 }
467 426
427 /*
428 * If we support a HW FPU, we need to ensure the FP state
429 * is flushed into the thread_struct before attempting
430 * emulation
431 */
432 flush_fp_to_thread(current);
433
468 eflag = func(op0, op1, op2, op3); 434 eflag = func(op0, op1, op2, op3);
469 435
470 if (insn & 1) { 436 if (insn & 1) {
@@ -485,7 +451,6 @@ do_mathemu(struct pt_regs *regs)
485 default: 451 default:
486 break; 452 break;
487 } 453 }
488#endif /* CONFIG_MATH_EMULATION */
489 454
490 regs->nip += 4; 455 regs->nip += 4;
491 return 0; 456 return 0;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 8726779e1409..76d8e7cc7805 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -443,8 +443,12 @@ good_area:
443 regs, address); 443 regs, address);
444#ifdef CONFIG_PPC_SMLPAR 444#ifdef CONFIG_PPC_SMLPAR
445 if (firmware_has_feature(FW_FEATURE_CMO)) { 445 if (firmware_has_feature(FW_FEATURE_CMO)) {
446 u32 page_ins;
447
446 preempt_disable(); 448 preempt_disable();
447 get_lppaca()->page_ins += (1 << PAGE_FACTOR); 449 page_ins = be32_to_cpu(get_lppaca()->page_ins);
450 page_ins += 1 << PAGE_FACTOR;
451 get_lppaca()->page_ins = cpu_to_be32(page_ins);
448 preempt_enable(); 452 preempt_enable();
449 } 453 }
450#endif /* CONFIG_PPC_SMLPAR */ 454#endif /* CONFIG_PPC_SMLPAR */
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 49822d90ea96..6936547018b8 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -117,8 +117,8 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
117 return 1; 117 return 1;
118} 118}
119 119
120int get_user_pages_fast(unsigned long start, int nr_pages, int write, 120int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
121 struct page **pages) 121 struct page **pages)
122{ 122{
123 struct mm_struct *mm = current->mm; 123 struct mm_struct *mm = current->mm;
124 unsigned long addr, len, end; 124 unsigned long addr, len, end;
@@ -135,7 +135,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
135 135
136 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, 136 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
137 start, len))) 137 start, len)))
138 goto slow_irqon; 138 return 0;
139 139
140 pr_devel(" aligned: %lx .. %lx\n", start, end); 140 pr_devel(" aligned: %lx .. %lx\n", start, end);
141 141
@@ -166,30 +166,35 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
166 (void *)pgd_val(pgd)); 166 (void *)pgd_val(pgd));
167 next = pgd_addr_end(addr, end); 167 next = pgd_addr_end(addr, end);
168 if (pgd_none(pgd)) 168 if (pgd_none(pgd))
169 goto slow; 169 break;
170 if (pgd_huge(pgd)) { 170 if (pgd_huge(pgd)) {
171 if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next, 171 if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
172 write, pages, &nr)) 172 write, pages, &nr))
173 goto slow; 173 break;
174 } else if (is_hugepd(pgdp)) { 174 } else if (is_hugepd(pgdp)) {
175 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT, 175 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
176 addr, next, write, pages, &nr)) 176 addr, next, write, pages, &nr))
177 goto slow; 177 break;
178 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) 178 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
179 goto slow; 179 break;
180 } while (pgdp++, addr = next, addr != end); 180 } while (pgdp++, addr = next, addr != end);
181 181
182 local_irq_enable(); 182 local_irq_enable();
183 183
184 VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
185 return nr; 184 return nr;
185}
186 186
187 { 187int get_user_pages_fast(unsigned long start, int nr_pages, int write,
188 int ret; 188 struct page **pages)
189{
190 struct mm_struct *mm = current->mm;
191 int nr, ret;
192
193 start &= PAGE_MASK;
194 nr = __get_user_pages_fast(start, nr_pages, write, pages);
195 ret = nr;
189 196
190slow: 197 if (nr < nr_pages) {
191 local_irq_enable();
192slow_irqon:
193 pr_devel(" slow path ! nr = %d\n", nr); 198 pr_devel(" slow path ! nr = %d\n", nr);
194 199
195 /* Try to get the remaining pages with get_user_pages */ 200 /* Try to get the remaining pages with get_user_pages */
@@ -198,7 +203,7 @@ slow_irqon:
198 203
199 down_read(&mm->mmap_sem); 204 down_read(&mm->mmap_sem);
200 ret = get_user_pages(current, mm, start, 205 ret = get_user_pages(current, mm, start,
201 (end - start) >> PAGE_SHIFT, write, 0, pages, NULL); 206 nr_pages - nr, write, 0, pages, NULL);
202 up_read(&mm->mmap_sem); 207 up_read(&mm->mmap_sem);
203 208
204 /* Have to be a bit careful with return values */ 209 /* Have to be a bit careful with return values */
@@ -208,9 +213,9 @@ slow_irqon:
208 else 213 else
209 ret += nr; 214 ret += nr;
210 } 215 }
211
212 return ret;
213 } 216 }
217
218 return ret;
214} 219}
215 220
216#endif /* __HAVE_ARCH_PTE_SPECIAL */ 221#endif /* __HAVE_ARCH_PTE_SPECIAL */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 6ecc38bd5b24..bde8b5589755 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -907,7 +907,7 @@ static int subpage_protection(struct mm_struct *mm, unsigned long ea)
907 907
908 if (ea >= spt->maxaddr) 908 if (ea >= spt->maxaddr)
909 return 0; 909 return 0;
910 if (ea < 0x100000000) { 910 if (ea < 0x100000000UL) {
911 /* addresses below 4GB use spt->low_prot */ 911 /* addresses below 4GB use spt->low_prot */
912 sbpm = spt->low_prot; 912 sbpm = spt->low_prot;
913 } else { 913 } else {
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 01e2db97a210..d47d3dab4870 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -52,7 +52,7 @@
52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL) 52#if defined(CONFIG_KERNEL_START_BOOL) || defined(CONFIG_LOWMEM_SIZE_BOOL)
53/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */ 53/* The amount of lowmem must be within 0xF0000000 - KERNELBASE. */
54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET)) 54#if (CONFIG_LOWMEM_SIZE > (0xF0000000 - PAGE_OFFSET))
55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_START_KERNEL" 55#error "You must adjust CONFIG_LOWMEM_SIZE or CONFIG_KERNEL_START"
56#endif 56#endif
57#endif 57#endif
58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE 58#define MAX_LOW_MEM CONFIG_LOWMEM_SIZE
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 7f4bea162026..1cf9c5b67f24 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -514,7 +514,7 @@ static int add_system_ram_resources(void)
514 res->name = "System RAM"; 514 res->name = "System RAM";
515 res->start = base; 515 res->start = base;
516 res->end = base + size - 1; 516 res->end = base + size - 1;
517 res->flags = IORESOURCE_MEM; 517 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
518 WARN_ON(request_resource(&iomem_resource, res) < 0); 518 WARN_ON(request_resource(&iomem_resource, res) < 0);
519 } 519 }
520 } 520 }
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 5850798826cd..c916127f10c3 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -58,7 +58,7 @@ static int form1_affinity;
58 58
59#define MAX_DISTANCE_REF_POINTS 4 59#define MAX_DISTANCE_REF_POINTS 4
60static int distance_ref_points_depth; 60static int distance_ref_points_depth;
61static const unsigned int *distance_ref_points; 61static const __be32 *distance_ref_points;
62static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS]; 62static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
63 63
64/* 64/*
@@ -179,7 +179,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
179#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ 179#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
180 180
181/* must hold reference to node during call */ 181/* must hold reference to node during call */
182static const int *of_get_associativity(struct device_node *dev) 182static const __be32 *of_get_associativity(struct device_node *dev)
183{ 183{
184 return of_get_property(dev, "ibm,associativity", NULL); 184 return of_get_property(dev, "ibm,associativity", NULL);
185} 185}
@@ -189,9 +189,9 @@ static const int *of_get_associativity(struct device_node *dev)
189 * it exists (the property exists only in kexec/kdump kernels, 189 * it exists (the property exists only in kexec/kdump kernels,
190 * added by kexec-tools) 190 * added by kexec-tools)
191 */ 191 */
192static const u32 *of_get_usable_memory(struct device_node *memory) 192static const __be32 *of_get_usable_memory(struct device_node *memory)
193{ 193{
194 const u32 *prop; 194 const __be32 *prop;
195 u32 len; 195 u32 len;
196 prop = of_get_property(memory, "linux,drconf-usable-memory", &len); 196 prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
197 if (!prop || len < sizeof(unsigned int)) 197 if (!prop || len < sizeof(unsigned int))
@@ -219,7 +219,7 @@ int __node_distance(int a, int b)
219} 219}
220 220
221static void initialize_distance_lookup_table(int nid, 221static void initialize_distance_lookup_table(int nid,
222 const unsigned int *associativity) 222 const __be32 *associativity)
223{ 223{
224 int i; 224 int i;
225 225
@@ -227,29 +227,32 @@ static void initialize_distance_lookup_table(int nid,
227 return; 227 return;
228 228
229 for (i = 0; i < distance_ref_points_depth; i++) { 229 for (i = 0; i < distance_ref_points_depth; i++) {
230 distance_lookup_table[nid][i] = 230 const __be32 *entry;
231 associativity[distance_ref_points[i]]; 231
232 entry = &associativity[be32_to_cpu(distance_ref_points[i])];
233 distance_lookup_table[nid][i] = of_read_number(entry, 1);
232 } 234 }
233} 235}
234 236
235/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa 237/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
236 * info is found. 238 * info is found.
237 */ 239 */
238static int associativity_to_nid(const unsigned int *associativity) 240static int associativity_to_nid(const __be32 *associativity)
239{ 241{
240 int nid = -1; 242 int nid = -1;
241 243
242 if (min_common_depth == -1) 244 if (min_common_depth == -1)
243 goto out; 245 goto out;
244 246
245 if (associativity[0] >= min_common_depth) 247 if (of_read_number(associativity, 1) >= min_common_depth)
246 nid = associativity[min_common_depth]; 248 nid = of_read_number(&associativity[min_common_depth], 1);
247 249
248 /* POWER4 LPAR uses 0xffff as invalid node */ 250 /* POWER4 LPAR uses 0xffff as invalid node */
249 if (nid == 0xffff || nid >= MAX_NUMNODES) 251 if (nid == 0xffff || nid >= MAX_NUMNODES)
250 nid = -1; 252 nid = -1;
251 253
252 if (nid > 0 && associativity[0] >= distance_ref_points_depth) 254 if (nid > 0 &&
255 of_read_number(associativity, 1) >= distance_ref_points_depth)
253 initialize_distance_lookup_table(nid, associativity); 256 initialize_distance_lookup_table(nid, associativity);
254 257
255out: 258out:
@@ -262,7 +265,7 @@ out:
262static int of_node_to_nid_single(struct device_node *device) 265static int of_node_to_nid_single(struct device_node *device)
263{ 266{
264 int nid = -1; 267 int nid = -1;
265 const unsigned int *tmp; 268 const __be32 *tmp;
266 269
267 tmp = of_get_associativity(device); 270 tmp = of_get_associativity(device);
268 if (tmp) 271 if (tmp)
@@ -334,7 +337,7 @@ static int __init find_min_common_depth(void)
334 } 337 }
335 338
336 if (form1_affinity) { 339 if (form1_affinity) {
337 depth = distance_ref_points[0]; 340 depth = of_read_number(distance_ref_points, 1);
338 } else { 341 } else {
339 if (distance_ref_points_depth < 2) { 342 if (distance_ref_points_depth < 2) {
340 printk(KERN_WARNING "NUMA: " 343 printk(KERN_WARNING "NUMA: "
@@ -342,7 +345,7 @@ static int __init find_min_common_depth(void)
342 goto err; 345 goto err;
343 } 346 }
344 347
345 depth = distance_ref_points[1]; 348 depth = of_read_number(&distance_ref_points[1], 1);
346 } 349 }
347 350
348 /* 351 /*
@@ -376,12 +379,12 @@ static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
376 of_node_put(memory); 379 of_node_put(memory);
377} 380}
378 381
379static unsigned long read_n_cells(int n, const unsigned int **buf) 382static unsigned long read_n_cells(int n, const __be32 **buf)
380{ 383{
381 unsigned long result = 0; 384 unsigned long result = 0;
382 385
383 while (n--) { 386 while (n--) {
384 result = (result << 32) | **buf; 387 result = (result << 32) | of_read_number(*buf, 1);
385 (*buf)++; 388 (*buf)++;
386 } 389 }
387 return result; 390 return result;
@@ -391,17 +394,17 @@ static unsigned long read_n_cells(int n, const unsigned int **buf)
391 * Read the next memblock list entry from the ibm,dynamic-memory property 394 * Read the next memblock list entry from the ibm,dynamic-memory property
392 * and return the information in the provided of_drconf_cell structure. 395 * and return the information in the provided of_drconf_cell structure.
393 */ 396 */
394static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) 397static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
395{ 398{
396 const u32 *cp; 399 const __be32 *cp;
397 400
398 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp); 401 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
399 402
400 cp = *cellp; 403 cp = *cellp;
401 drmem->drc_index = cp[0]; 404 drmem->drc_index = of_read_number(cp, 1);
402 drmem->reserved = cp[1]; 405 drmem->reserved = of_read_number(&cp[1], 1);
403 drmem->aa_index = cp[2]; 406 drmem->aa_index = of_read_number(&cp[2], 1);
404 drmem->flags = cp[3]; 407 drmem->flags = of_read_number(&cp[3], 1);
405 408
406 *cellp = cp + 4; 409 *cellp = cp + 4;
407} 410}
@@ -413,16 +416,16 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
413 * list entries followed by N memblock list entries. Each memblock list entry 416 * list entries followed by N memblock list entries. Each memblock list entry
414 * contains information as laid out in the of_drconf_cell struct above. 417 * contains information as laid out in the of_drconf_cell struct above.
415 */ 418 */
416static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) 419static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
417{ 420{
418 const u32 *prop; 421 const __be32 *prop;
419 u32 len, entries; 422 u32 len, entries;
420 423
421 prop = of_get_property(memory, "ibm,dynamic-memory", &len); 424 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
422 if (!prop || len < sizeof(unsigned int)) 425 if (!prop || len < sizeof(unsigned int))
423 return 0; 426 return 0;
424 427
425 entries = *prop++; 428 entries = of_read_number(prop++, 1);
426 429
427 /* Now that we know the number of entries, revalidate the size 430 /* Now that we know the number of entries, revalidate the size
428 * of the property read in to ensure we have everything 431 * of the property read in to ensure we have everything
@@ -440,7 +443,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
440 */ 443 */
441static u64 of_get_lmb_size(struct device_node *memory) 444static u64 of_get_lmb_size(struct device_node *memory)
442{ 445{
443 const u32 *prop; 446 const __be32 *prop;
444 u32 len; 447 u32 len;
445 448
446 prop = of_get_property(memory, "ibm,lmb-size", &len); 449 prop = of_get_property(memory, "ibm,lmb-size", &len);
@@ -453,7 +456,7 @@ static u64 of_get_lmb_size(struct device_node *memory)
453struct assoc_arrays { 456struct assoc_arrays {
454 u32 n_arrays; 457 u32 n_arrays;
455 u32 array_sz; 458 u32 array_sz;
456 const u32 *arrays; 459 const __be32 *arrays;
457}; 460};
458 461
459/* 462/*
@@ -469,15 +472,15 @@ struct assoc_arrays {
469static int of_get_assoc_arrays(struct device_node *memory, 472static int of_get_assoc_arrays(struct device_node *memory,
470 struct assoc_arrays *aa) 473 struct assoc_arrays *aa)
471{ 474{
472 const u32 *prop; 475 const __be32 *prop;
473 u32 len; 476 u32 len;
474 477
475 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len); 478 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
476 if (!prop || len < 2 * sizeof(unsigned int)) 479 if (!prop || len < 2 * sizeof(unsigned int))
477 return -1; 480 return -1;
478 481
479 aa->n_arrays = *prop++; 482 aa->n_arrays = of_read_number(prop++, 1);
480 aa->array_sz = *prop++; 483 aa->array_sz = of_read_number(prop++, 1);
481 484
482 /* Now that we know the number of arrays and size of each array, 485 /* Now that we know the number of arrays and size of each array,
483 * revalidate the size of the property read in. 486 * revalidate the size of the property read in.
@@ -504,7 +507,7 @@ static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
504 !(drmem->flags & DRCONF_MEM_AI_INVALID) && 507 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
505 drmem->aa_index < aa->n_arrays) { 508 drmem->aa_index < aa->n_arrays) {
506 index = drmem->aa_index * aa->array_sz + min_common_depth - 1; 509 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
507 nid = aa->arrays[index]; 510 nid = of_read_number(&aa->arrays[index], 1);
508 511
509 if (nid == 0xffff || nid >= MAX_NUMNODES) 512 if (nid == 0xffff || nid >= MAX_NUMNODES)
510 nid = default_nid; 513 nid = default_nid;
@@ -595,7 +598,7 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
595 * Reads the counter for a given entry in 598 * Reads the counter for a given entry in
596 * linux,drconf-usable-memory property 599 * linux,drconf-usable-memory property
597 */ 600 */
598static inline int __init read_usm_ranges(const u32 **usm) 601static inline int __init read_usm_ranges(const __be32 **usm)
599{ 602{
600 /* 603 /*
601 * For each lmb in ibm,dynamic-memory a corresponding 604 * For each lmb in ibm,dynamic-memory a corresponding
@@ -612,7 +615,7 @@ static inline int __init read_usm_ranges(const u32 **usm)
612 */ 615 */
613static void __init parse_drconf_memory(struct device_node *memory) 616static void __init parse_drconf_memory(struct device_node *memory)
614{ 617{
615 const u32 *uninitialized_var(dm), *usm; 618 const __be32 *uninitialized_var(dm), *usm;
616 unsigned int n, rc, ranges, is_kexec_kdump = 0; 619 unsigned int n, rc, ranges, is_kexec_kdump = 0;
617 unsigned long lmb_size, base, size, sz; 620 unsigned long lmb_size, base, size, sz;
618 int nid; 621 int nid;
@@ -721,7 +724,7 @@ static int __init parse_numa_properties(void)
721 unsigned long size; 724 unsigned long size;
722 int nid; 725 int nid;
723 int ranges; 726 int ranges;
724 const unsigned int *memcell_buf; 727 const __be32 *memcell_buf;
725 unsigned int len; 728 unsigned int len;
726 729
727 memcell_buf = of_get_property(memory, 730 memcell_buf = of_get_property(memory,
@@ -1106,7 +1109,7 @@ early_param("numa", early_numa);
1106static int hot_add_drconf_scn_to_nid(struct device_node *memory, 1109static int hot_add_drconf_scn_to_nid(struct device_node *memory,
1107 unsigned long scn_addr) 1110 unsigned long scn_addr)
1108{ 1111{
1109 const u32 *dm; 1112 const __be32 *dm;
1110 unsigned int drconf_cell_cnt, rc; 1113 unsigned int drconf_cell_cnt, rc;
1111 unsigned long lmb_size; 1114 unsigned long lmb_size;
1112 struct assoc_arrays aa; 1115 struct assoc_arrays aa;
@@ -1159,7 +1162,7 @@ int hot_add_node_scn_to_nid(unsigned long scn_addr)
1159 for_each_node_by_type(memory, "memory") { 1162 for_each_node_by_type(memory, "memory") {
1160 unsigned long start, size; 1163 unsigned long start, size;
1161 int ranges; 1164 int ranges;
1162 const unsigned int *memcell_buf; 1165 const __be32 *memcell_buf;
1163 unsigned int len; 1166 unsigned int len;
1164 1167
1165 memcell_buf = of_get_property(memory, "reg", &len); 1168 memcell_buf = of_get_property(memory, "reg", &len);
@@ -1232,7 +1235,7 @@ static u64 hot_add_drconf_memory_max(void)
1232 struct device_node *memory = NULL; 1235 struct device_node *memory = NULL;
1233 unsigned int drconf_cell_cnt = 0; 1236 unsigned int drconf_cell_cnt = 0;
1234 u64 lmb_size = 0; 1237 u64 lmb_size = 0;
1235 const u32 *dm = 0; 1238 const __be32 *dm = 0;
1236 1239
1237 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 1240 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
1238 if (memory) { 1241 if (memory) {
@@ -1337,40 +1340,41 @@ static int update_cpu_associativity_changes_mask(void)
1337 * Convert the associativity domain numbers returned from the hypervisor 1340 * Convert the associativity domain numbers returned from the hypervisor
1338 * to the sequence they would appear in the ibm,associativity property. 1341 * to the sequence they would appear in the ibm,associativity property.
1339 */ 1342 */
1340static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) 1343static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
1341{ 1344{
1342 int i, nr_assoc_doms = 0; 1345 int i, nr_assoc_doms = 0;
1343 const u16 *field = (const u16*) packed; 1346 const __be16 *field = (const __be16 *) packed;
1344 1347
1345#define VPHN_FIELD_UNUSED (0xffff) 1348#define VPHN_FIELD_UNUSED (0xffff)
1346#define VPHN_FIELD_MSB (0x8000) 1349#define VPHN_FIELD_MSB (0x8000)
1347#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 1350#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1348 1351
1349 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { 1352 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1350 if (*field == VPHN_FIELD_UNUSED) { 1353 if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
1351 /* All significant fields processed, and remaining 1354 /* All significant fields processed, and remaining
1352 * fields contain the reserved value of all 1's. 1355 * fields contain the reserved value of all 1's.
1353 * Just store them. 1356 * Just store them.
1354 */ 1357 */
1355 unpacked[i] = *((u32*)field); 1358 unpacked[i] = *((__be32 *)field);
1356 field += 2; 1359 field += 2;
1357 } else if (*field & VPHN_FIELD_MSB) { 1360 } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
1358 /* Data is in the lower 15 bits of this field */ 1361 /* Data is in the lower 15 bits of this field */
1359 unpacked[i] = *field & VPHN_FIELD_MASK; 1362 unpacked[i] = cpu_to_be32(
1363 be16_to_cpup(field) & VPHN_FIELD_MASK);
1360 field++; 1364 field++;
1361 nr_assoc_doms++; 1365 nr_assoc_doms++;
1362 } else { 1366 } else {
1363 /* Data is in the lower 15 bits of this field 1367 /* Data is in the lower 15 bits of this field
1364 * concatenated with the next 16 bit field 1368 * concatenated with the next 16 bit field
1365 */ 1369 */
1366 unpacked[i] = *((u32*)field); 1370 unpacked[i] = *((__be32 *)field);
1367 field += 2; 1371 field += 2;
1368 nr_assoc_doms++; 1372 nr_assoc_doms++;
1369 } 1373 }
1370 } 1374 }
1371 1375
1372 /* The first cell contains the length of the property */ 1376 /* The first cell contains the length of the property */
1373 unpacked[0] = nr_assoc_doms; 1377 unpacked[0] = cpu_to_be32(nr_assoc_doms);
1374 1378
1375 return nr_assoc_doms; 1379 return nr_assoc_doms;
1376} 1380}
@@ -1379,7 +1383,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1379 * Retrieve the new associativity information for a virtual processor's 1383 * Retrieve the new associativity information for a virtual processor's
1380 * home node. 1384 * home node.
1381 */ 1385 */
1382static long hcall_vphn(unsigned long cpu, unsigned int *associativity) 1386static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1383{ 1387{
1384 long rc; 1388 long rc;
1385 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1389 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
@@ -1393,7 +1397,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1393} 1397}
1394 1398
1395static long vphn_get_associativity(unsigned long cpu, 1399static long vphn_get_associativity(unsigned long cpu,
1396 unsigned int *associativity) 1400 __be32 *associativity)
1397{ 1401{
1398 long rc; 1402 long rc;
1399 1403
@@ -1450,7 +1454,7 @@ int arch_update_cpu_topology(void)
1450{ 1454{
1451 unsigned int cpu, sibling, changed = 0; 1455 unsigned int cpu, sibling, changed = 0;
1452 struct topology_update_data *updates, *ud; 1456 struct topology_update_data *updates, *ud;
1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1457 __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1454 cpumask_t updated_cpus; 1458 cpumask_t updated_cpus;
1455 struct device *dev; 1459 struct device *dev;
1456 int weight, new_nid, i = 0; 1460 int weight, new_nid, i = 0;
@@ -1609,7 +1613,7 @@ int start_topology_update(void)
1609#endif 1613#endif
1610 } 1614 }
1611 } else if (firmware_has_feature(FW_FEATURE_VPHN) && 1615 } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
1612 get_lppaca()->shared_proc) { 1616 lppaca_shared_proc(get_lppaca())) {
1613 if (!vphn_enabled) { 1617 if (!vphn_enabled) {
1614 prrn_enabled = 0; 1618 prrn_enabled = 0;
1615 vphn_enabled = 1; 1619 vphn_enabled = 1;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a538c80db2df..9d1d33cd2be5 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -66,8 +66,10 @@ static inline void slb_shadow_update(unsigned long ea, int ssize,
66 * we only update the current CPU's SLB shadow buffer. 66 * we only update the current CPU's SLB shadow buffer.
67 */ 67 */
68 get_slb_shadow()->save_area[entry].esid = 0; 68 get_slb_shadow()->save_area[entry].esid = 0;
69 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, ssize, flags); 69 get_slb_shadow()->save_area[entry].vsid =
70 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, ssize, entry); 70 cpu_to_be64(mk_vsid_data(ea, ssize, flags));
71 get_slb_shadow()->save_area[entry].esid =
72 cpu_to_be64(mk_esid_data(ea, ssize, entry));
71} 73}
72 74
73static inline void slb_shadow_clear(unsigned long entry) 75static inline void slb_shadow_clear(unsigned long entry)
@@ -112,7 +114,8 @@ static void __slb_flush_and_rebolt(void)
112 } else { 114 } else {
113 /* Update stack entry; others don't change */ 115 /* Update stack entry; others don't change */
114 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2); 116 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, 2);
115 ksp_vsid_data = get_slb_shadow()->save_area[2].vsid; 117 ksp_vsid_data =
118 be64_to_cpu(get_slb_shadow()->save_area[2].vsid);
116 } 119 }
117 120
118 /* We need to do this all in asm, so we're sure we don't touch 121 /* We need to do this all in asm, so we're sure we don't touch
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index aa74acb0fdfc..a770df2dae70 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -105,7 +105,7 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
105 limit = spt->maxaddr; 105 limit = spt->maxaddr;
106 for (; addr < limit; addr = next) { 106 for (; addr < limit; addr = next) {
107 next = pmd_addr_end(addr, limit); 107 next = pmd_addr_end(addr, limit);
108 if (addr < 0x100000000) { 108 if (addr < 0x100000000UL) {
109 spm = spt->low_prot; 109 spm = spt->low_prot;
110 } else { 110 } else {
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT]; 111 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
@@ -219,7 +219,7 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
219 for (limit = addr + len; addr < limit; addr = next) { 219 for (limit = addr + len; addr < limit; addr = next) {
220 next = pmd_addr_end(addr, limit); 220 next = pmd_addr_end(addr, limit);
221 err = -ENOMEM; 221 err = -ENOMEM;
222 if (addr < 0x100000000) { 222 if (addr < 0x100000000UL) {
223 spm = spt->low_prot; 223 spm = spt->low_prot;
224 } else { 224 } else {
225 spm = spt->protptrs[addr >> SBP_L3_SHIFT]; 225 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 4f51025f5b00..c77348c5d463 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -119,7 +119,7 @@ static void op_powerpc_stop(void)
119 model->global_stop(); 119 model->global_stop();
120} 120}
121 121
122static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) 122static int op_powerpc_create_files(struct dentry *root)
123{ 123{
124 int i; 124 int i;
125 125
@@ -128,9 +128,9 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
128 * There is one mmcr0, mmcr1 and mmcra for setting the events for 128 * There is one mmcr0, mmcr1 and mmcra for setting the events for
129 * all of the counters. 129 * all of the counters.
130 */ 130 */
131 oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); 131 oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
132 oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); 132 oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
133 oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); 133 oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
134#ifdef CONFIG_OPROFILE_CELL 134#ifdef CONFIG_OPROFILE_CELL
135 /* create a file the user tool can check to see what level of profiling 135 /* create a file the user tool can check to see what level of profiling
136 * support exits with this kernel. Initialize bit mask to indicate 136 * support exits with this kernel. Initialize bit mask to indicate
@@ -142,7 +142,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
142 * If the file does not exist, then the kernel only supports SPU 142 * If the file does not exist, then the kernel only supports SPU
143 * cycle profiling, PPU event and cycle profiling. 143 * cycle profiling, PPU event and cycle profiling.
144 */ 144 */
145 oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support); 145 oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
146 sys.cell_support = 0x1; /* Note, the user OProfile tool must check 146 sys.cell_support = 0x1; /* Note, the user OProfile tool must check
147 * that this bit is set before attempting to 147 * that this bit is set before attempting to
148 * user SPU event profiling. Older kernels 148 * user SPU event profiling. Older kernels
@@ -160,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
160 char buf[4]; 160 char buf[4];
161 161
162 snprintf(buf, sizeof buf, "%d", i); 162 snprintf(buf, sizeof buf, "%d", i);
163 dir = oprofilefs_mkdir(sb, root, buf); 163 dir = oprofilefs_mkdir(root, buf);
164 164
165 oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); 165 oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
166 oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); 166 oprofilefs_create_ulong(dir, "event", &ctr[i].event);
167 oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); 167 oprofilefs_create_ulong(dir, "count", &ctr[i].count);
168 168
169 /* 169 /*
170 * Classic PowerPC doesn't support per-counter 170 * Classic PowerPC doesn't support per-counter
@@ -173,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
173 * Book-E style performance monitors, we do 173 * Book-E style performance monitors, we do
174 * support them. 174 * support them.
175 */ 175 */
176 oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); 176 oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
177 oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); 177 oprofilefs_create_ulong(dir, "user", &ctr[i].user);
178 178
179 oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); 179 oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
180 } 180 }
181 181
182 oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); 182 oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
183 oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); 183 oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
184 184
185 /* Default to tracing both kernel and user */ 185 /* Default to tracing both kernel and user */
186 sys.enable_kernel = 1; 186 sys.enable_kernel = 1;
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c
index ccc1daa33aed..2a82d3ed464d 100644
--- a/arch/powerpc/oprofile/op_model_fsl_emb.c
+++ b/arch/powerpc/oprofile/op_model_fsl_emb.c
@@ -46,6 +46,12 @@ static inline u32 get_pmlca(int ctr)
46 case 3: 46 case 3:
47 pmlca = mfpmr(PMRN_PMLCA3); 47 pmlca = mfpmr(PMRN_PMLCA3);
48 break; 48 break;
49 case 4:
50 pmlca = mfpmr(PMRN_PMLCA4);
51 break;
52 case 5:
53 pmlca = mfpmr(PMRN_PMLCA5);
54 break;
49 default: 55 default:
50 panic("Bad ctr number\n"); 56 panic("Bad ctr number\n");
51 } 57 }
@@ -68,6 +74,12 @@ static inline void set_pmlca(int ctr, u32 pmlca)
68 case 3: 74 case 3:
69 mtpmr(PMRN_PMLCA3, pmlca); 75 mtpmr(PMRN_PMLCA3, pmlca);
70 break; 76 break;
77 case 4:
78 mtpmr(PMRN_PMLCA4, pmlca);
79 break;
80 case 5:
81 mtpmr(PMRN_PMLCA5, pmlca);
82 break;
71 default: 83 default:
72 panic("Bad ctr number\n"); 84 panic("Bad ctr number\n");
73 } 85 }
@@ -84,6 +96,10 @@ static inline unsigned int ctr_read(unsigned int i)
84 return mfpmr(PMRN_PMC2); 96 return mfpmr(PMRN_PMC2);
85 case 3: 97 case 3:
86 return mfpmr(PMRN_PMC3); 98 return mfpmr(PMRN_PMC3);
99 case 4:
100 return mfpmr(PMRN_PMC4);
101 case 5:
102 return mfpmr(PMRN_PMC5);
87 default: 103 default:
88 return 0; 104 return 0;
89 } 105 }
@@ -104,6 +120,12 @@ static inline void ctr_write(unsigned int i, unsigned int val)
104 case 3: 120 case 3:
105 mtpmr(PMRN_PMC3, val); 121 mtpmr(PMRN_PMC3, val);
106 break; 122 break;
123 case 4:
124 mtpmr(PMRN_PMC4, val);
125 break;
126 case 5:
127 mtpmr(PMRN_PMC5, val);
128 break;
107 default: 129 default:
108 break; 130 break;
109 } 131 }
@@ -133,6 +155,14 @@ static void init_pmc_stop(int ctr)
133 mtpmr(PMRN_PMLCA3, pmlca); 155 mtpmr(PMRN_PMLCA3, pmlca);
134 mtpmr(PMRN_PMLCB3, pmlcb); 156 mtpmr(PMRN_PMLCB3, pmlcb);
135 break; 157 break;
158 case 4:
159 mtpmr(PMRN_PMLCA4, pmlca);
160 mtpmr(PMRN_PMLCB4, pmlcb);
161 break;
162 case 5:
163 mtpmr(PMRN_PMLCA5, pmlca);
164 mtpmr(PMRN_PMLCB5, pmlcb);
165 break;
136 default: 166 default:
137 panic("Bad ctr number!\n"); 167 panic("Bad ctr number!\n");
138 } 168 }
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 510fae10513d..60d71eea919c 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -9,7 +9,7 @@ obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
9obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 9obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
10 10
11obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o 11obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
12obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o 12obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o
13 13
14obj-$(CONFIG_PPC64) += $(obj64-y) 14obj-$(CONFIG_PPC64) += $(obj64-y)
15obj-$(CONFIG_PPC32) += $(obj32-y) 15obj-$(CONFIG_PPC32) += $(obj32-y)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index eeae308cf982..29b89e863d7c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -24,7 +24,7 @@
24#define BHRB_MAX_ENTRIES 32 24#define BHRB_MAX_ENTRIES 32
25#define BHRB_TARGET 0x0000000000000002 25#define BHRB_TARGET 0x0000000000000002
26#define BHRB_PREDICTION 0x0000000000000001 26#define BHRB_PREDICTION 0x0000000000000001
27#define BHRB_EA 0xFFFFFFFFFFFFFFFC 27#define BHRB_EA 0xFFFFFFFFFFFFFFFCUL
28 28
29struct cpu_hw_events { 29struct cpu_hw_events {
30 int n_events; 30 int n_events;
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 106c53354675..d35ae52c69dc 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -70,6 +70,12 @@ static unsigned long read_pmc(int idx)
70 case 3: 70 case 3:
71 val = mfpmr(PMRN_PMC3); 71 val = mfpmr(PMRN_PMC3);
72 break; 72 break;
73 case 4:
74 val = mfpmr(PMRN_PMC4);
75 break;
76 case 5:
77 val = mfpmr(PMRN_PMC5);
78 break;
73 default: 79 default:
74 printk(KERN_ERR "oops trying to read PMC%d\n", idx); 80 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
75 val = 0; 81 val = 0;
@@ -95,6 +101,12 @@ static void write_pmc(int idx, unsigned long val)
95 case 3: 101 case 3:
96 mtpmr(PMRN_PMC3, val); 102 mtpmr(PMRN_PMC3, val);
97 break; 103 break;
104 case 4:
105 mtpmr(PMRN_PMC4, val);
106 break;
107 case 5:
108 mtpmr(PMRN_PMC5, val);
109 break;
98 default: 110 default:
99 printk(KERN_ERR "oops trying to write PMC%d\n", idx); 111 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
100 } 112 }
@@ -120,6 +132,12 @@ static void write_pmlca(int idx, unsigned long val)
120 case 3: 132 case 3:
121 mtpmr(PMRN_PMLCA3, val); 133 mtpmr(PMRN_PMLCA3, val);
122 break; 134 break;
135 case 4:
136 mtpmr(PMRN_PMLCA4, val);
137 break;
138 case 5:
139 mtpmr(PMRN_PMLCA5, val);
140 break;
123 default: 141 default:
124 printk(KERN_ERR "oops trying to write PMLCA%d\n", idx); 142 printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
125 } 143 }
@@ -145,6 +163,12 @@ static void write_pmlcb(int idx, unsigned long val)
145 case 3: 163 case 3:
146 mtpmr(PMRN_PMLCB3, val); 164 mtpmr(PMRN_PMLCB3, val);
147 break; 165 break;
166 case 4:
167 mtpmr(PMRN_PMLCB4, val);
168 break;
169 case 5:
170 mtpmr(PMRN_PMLCB5, val);
171 break;
148 default: 172 default:
149 printk(KERN_ERR "oops trying to write PMLCB%d\n", idx); 173 printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
150 } 174 }
@@ -462,6 +486,12 @@ static int fsl_emb_pmu_event_init(struct perf_event *event)
462 int num_restricted; 486 int num_restricted;
463 int i; 487 int i;
464 488
489 if (ppmu->n_counter > MAX_HWEVENTS) {
490 WARN(1, "No. of perf counters (%d) is higher than max array size(%d)\n",
491 ppmu->n_counter, MAX_HWEVENTS);
492 ppmu->n_counter = MAX_HWEVENTS;
493 }
494
465 switch (event->attr.type) { 495 switch (event->attr.type) {
466 case PERF_TYPE_HARDWARE: 496 case PERF_TYPE_HARDWARE:
467 ev = event->attr.config; 497 ev = event->attr.config;
diff --git a/arch/powerpc/perf/e6500-pmu.c b/arch/powerpc/perf/e6500-pmu.c
new file mode 100644
index 000000000000..3d877aa777b5
--- /dev/null
+++ b/arch/powerpc/perf/e6500-pmu.c
@@ -0,0 +1,121 @@
1/*
2 * Performance counter support for e6500 family processors.
3 *
4 * Author: Priyanka Jain, Priyanka.Jain@freescale.com
5 * Based on e500-pmu.c
6 * Copyright 2013 Freescale Semiconductor, Inc.
7 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#include <linux/string.h>
16#include <linux/perf_event.h>
17#include <asm/reg.h>
18#include <asm/cputable.h>
19
20/*
21 * Map of generic hardware event types to hardware events
22 * Zero if unsupported
23 */
24static int e6500_generic_events[] = {
25 [PERF_COUNT_HW_CPU_CYCLES] = 1,
26 [PERF_COUNT_HW_INSTRUCTIONS] = 2,
27 [PERF_COUNT_HW_CACHE_MISSES] = 221,
28 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 12,
29 [PERF_COUNT_HW_BRANCH_MISSES] = 15,
30};
31
32#define C(x) PERF_COUNT_HW_CACHE_##x
33
34/*
35 * Table of generalized cache-related events.
36 * 0 means not supported, -1 means nonsensical, other values
37 * are event codes.
38 */
39static int e6500_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
40 [C(L1D)] = {
41 /*RESULT_ACCESS RESULT_MISS */
42 [C(OP_READ)] = { 27, 222 },
43 [C(OP_WRITE)] = { 28, 223 },
44 [C(OP_PREFETCH)] = { 29, 0 },
45 },
46 [C(L1I)] = {
47 /*RESULT_ACCESS RESULT_MISS */
48 [C(OP_READ)] = { 2, 254 },
49 [C(OP_WRITE)] = { -1, -1 },
50 [C(OP_PREFETCH)] = { 37, 0 },
51 },
52 /*
53 * Assuming LL means L2, it's not a good match for this model.
54 * It does not have separate read/write events (but it does have
55 * separate instruction/data events).
56 */
57 [C(LL)] = {
58 /*RESULT_ACCESS RESULT_MISS */
59 [C(OP_READ)] = { 0, 0 },
60 [C(OP_WRITE)] = { 0, 0 },
61 [C(OP_PREFETCH)] = { 0, 0 },
62 },
63 /*
64 * There are data/instruction MMU misses, but that's a miss on
65 * the chip's internal level-one TLB which is probably not
66 * what the user wants. Instead, unified level-two TLB misses
67 * are reported here.
68 */
69 [C(DTLB)] = {
70 /*RESULT_ACCESS RESULT_MISS */
71 [C(OP_READ)] = { 26, 66 },
72 [C(OP_WRITE)] = { -1, -1 },
73 [C(OP_PREFETCH)] = { -1, -1 },
74 },
75 [C(BPU)] = {
76 /*RESULT_ACCESS RESULT_MISS */
77 [C(OP_READ)] = { 12, 15 },
78 [C(OP_WRITE)] = { -1, -1 },
79 [C(OP_PREFETCH)] = { -1, -1 },
80 },
81 [C(NODE)] = {
82 /* RESULT_ACCESS RESULT_MISS */
83 [C(OP_READ)] = { -1, -1 },
84 [C(OP_WRITE)] = { -1, -1 },
85 [C(OP_PREFETCH)] = { -1, -1 },
86 },
87};
88
89static int num_events = 512;
90
91/* Upper half of event id is PMLCb, for threshold events */
92static u64 e6500_xlate_event(u64 event_id)
93{
94 u32 event_low = (u32)event_id;
95 if (event_low >= num_events ||
96 (event_id & (FSL_EMB_EVENT_THRESHMUL | FSL_EMB_EVENT_THRESH)))
97 return 0;
98
99 return FSL_EMB_EVENT_VALID;
100}
101
102static struct fsl_emb_pmu e6500_pmu = {
103 .name = "e6500 family",
104 .n_counter = 6,
105 .n_restricted = 0,
106 .xlate_event = e6500_xlate_event,
107 .n_generic = ARRAY_SIZE(e6500_generic_events),
108 .generic_events = e6500_generic_events,
109 .cache_events = &e6500_cache_events,
110};
111
112static int init_e6500_pmu(void)
113{
114 if (!cur_cpu_spec->oprofile_cpu_type ||
115 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/e6500"))
116 return -ENODEV;
117
118 return register_fsl_emb_pmu(&e6500_pmu);
119}
120
121early_initcall(init_e6500_pmu);
diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h
new file mode 100644
index 000000000000..687790a2c0b8
--- /dev/null
+++ b/arch/powerpc/perf/power7-events-list.h
@@ -0,0 +1,548 @@
1/*
2 * Performance counter support for POWER7 processors.
3 *
4 * Copyright 2013 Runzhen Wang, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898)
13EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0)
14EVENT(PM_PMC2_SAVED, 0x10022)
15EVENT(PM_CMPLU_STALL_DFU, 0x2003c)
16EVENT(PM_VSU0_16FLOP, 0x0a0a4)
17EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a)
18EVENT(PM_MRK_ST_CMPL, 0x10034)
19EVENT(PM_NEST_PAIR3_ADD, 0x40881)
20EVENT(PM_L2_ST_DISP, 0x46180)
21EVENT(PM_L2_CASTOUT_MOD, 0x16180)
22EVENT(PM_ISEG, 0x020a4)
23EVENT(PM_MRK_INST_TIMEO, 0x40034)
24EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282)
25EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6)
26EVENT(PM_IERAT_WR_64K, 0x040be)
27EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e)
28EVENT(PM_IERAT_MISS, 0x100f6)
29EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052)
30EVENT(PM_FLOP, 0x100f4)
31EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4)
32EVENT(PM_BR_PRED_TA, 0x040aa)
33EVENT(PM_CMPLU_STALL_FXU, 0x20014)
34EVENT(PM_EXT_INT, 0x200f8)
35EVENT(PM_VSU_FSQRT_FDIV, 0x0a888)
36EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e)
37EVENT(PM_LSU1_LDF, 0x0c086)
38EVENT(PM_IC_WRITE_ALL, 0x0488c)
39EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0)
40EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052)
41EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e)
42EVENT(PM_DATA_FROM_L21_MOD, 0x3c046)
43EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a)
44EVENT(PM_VSU0_8FLOP, 0x0a0a0)
45EVENT(PM_POWER_EVENT1, 0x1006e)
46EVENT(PM_DISP_CLB_HELD_BAL, 0x02092)
47EVENT(PM_VSU1_2FLOP, 0x0a09a)
48EVENT(PM_LWSYNC_HELD, 0x0209a)
49EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054)
50EVENT(PM_INST_FROM_L21_MOD, 0x34046)
51EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc)
52EVENT(PM_IC_REQ_ALL, 0x04888)
53EVENT(PM_DSLB_MISS, 0x0d090)
54EVENT(PM_L3_MISS, 0x1f082)
55EVENT(PM_LSU0_L1_PREF, 0x0d0b8)
56EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884)
57EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be)
58EVENT(PM_L2_INST, 0x36080)
59EVENT(PM_VSU0_FRSP, 0x0a0b4)
60EVENT(PM_FLUSH_DISP, 0x02082)
61EVENT(PM_PTEG_FROM_L2MISS, 0x4c058)
62EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a)
63EVENT(PM_CMPLU_STALL_LSU, 0x20012)
64EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a)
65EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0)
66EVENT(PM_PTEG_FROM_LMEM, 0x4c052)
67EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c)
68EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c)
69EVENT(PM_MEM0_PREFETCH_DISP, 0x20083)
70EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f)
71EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c)
72EVENT(PM_VSU_FRSP, 0x0a8b4)
73EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046)
74EVENT(PM_PMC1_OVERFLOW, 0x20010)
75EVENT(PM_VSU0_SINGLE, 0x0a0a8)
76EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058)
77EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056)
78EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090)
79EVENT(PM_VSU1_FEST, 0x0a0ba)
80EVENT(PM_MRK_INST_DISP, 0x20030)
81EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096)
82EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6)
83EVENT(PM_INST_CMPL, 0x00002)
84EVENT(PM_FXU_IDLE, 0x1000e)
85EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0)
86EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c)
87EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c)
88EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6)
89EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056)
90EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042)
91EVENT(PM_SHL_CREATED, 0x05082)
92EVENT(PM_L2_ST_HIT, 0x46182)
93EVENT(PM_DATA_FROM_DMEM, 0x1c04a)
94EVENT(PM_L3_LD_MISS, 0x2f082)
95EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e)
96EVENT(PM_DISP_CLB_HELD_RES, 0x02094)
97EVENT(PM_L2_SN_SX_I_DONE, 0x36382)
98EVENT(PM_GRP_CMPL, 0x30004)
99EVENT(PM_STCX_CMPL, 0x0c098)
100EVENT(PM_VSU0_2FLOP, 0x0a098)
101EVENT(PM_L3_PREF_MISS, 0x3f082)
102EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096)
103EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064)
104EVENT(PM_L1_ICACHE_MISS, 0x200fc)
105EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be)
106EVENT(PM_LD_REF_L1_LSU0, 0x0c080)
107EVENT(PM_VSU0_FEST, 0x0a0b8)
108EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890)
109EVENT(PM_FREQ_UP, 0x4000c)
110EVENT(PM_DATA_FROM_LMEM, 0x3c04a)
111EVENT(PM_LSU1_LDX, 0x0c08a)
112EVENT(PM_PMC3_OVERFLOW, 0x40010)
113EVENT(PM_MRK_BR_MPRED, 0x30036)
114EVENT(PM_SHL_MATCH, 0x05086)
115EVENT(PM_MRK_BR_TAKEN, 0x10036)
116EVENT(PM_CMPLU_STALL_BRU, 0x4004e)
117EVENT(PM_ISLB_MISS, 0x0d092)
118EVENT(PM_CYC, 0x0001e)
119EVENT(PM_DISP_HELD_THERMAL, 0x30006)
120EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054)
121EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2)
122EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a)
123EVENT(PM_1PLUS_PPC_CMPL, 0x100f2)
124EVENT(PM_PTEG_FROM_DMEM, 0x2c052)
125EVENT(PM_VSU_2FLOP, 0x0a898)
126EVENT(PM_GCT_FULL_CYC, 0x04086)
127EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020)
128EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d)
129EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c)
130EVENT(PM_BR_MPRED_TA, 0x040ae)
131EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058)
132EVENT(PM_DPU_HELD_POWER, 0x20006)
133EVENT(PM_RUN_INST_CMPL, 0x400fa)
134EVENT(PM_MRK_VSU_FIN, 0x30032)
135EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c)
136EVENT(PM_GCT_EMPTY_CYC, 0x20008)
137EVENT(PM_IOPS_DISP, 0x30014)
138EVENT(PM_RUN_SPURR, 0x10008)
139EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056)
140EVENT(PM_VSU0_1FLOP, 0x0a080)
141EVENT(PM_SNOOP_TLBIE, 0x0d0b2)
142EVENT(PM_DATA_FROM_L3MISS, 0x2c048)
143EVENT(PM_VSU_SINGLE, 0x0a8a8)
144EVENT(PM_DTLB_MISS_16G, 0x1c05e)
145EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c)
146EVENT(PM_FLUSH, 0x400f8)
147EVENT(PM_L2_LD_HIT, 0x36182)
148EVENT(PM_NEST_PAIR2_AND, 0x30883)
149EVENT(PM_VSU1_1FLOP, 0x0a082)
150EVENT(PM_IC_PREF_REQ, 0x0408a)
151EVENT(PM_L3_LD_HIT, 0x2f080)
152EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a)
153EVENT(PM_DISP_HELD, 0x10006)
154EVENT(PM_L2_LD, 0x16080)
155EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc)
156EVENT(PM_BC_PLUS_8_CONV, 0x040b8)
157EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026)
158EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a)
159EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282)
160EVENT(PM_TB_BIT_TRANS, 0x300f8)
161EVENT(PM_THERMAL_MAX, 0x40006)
162EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2)
163EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae)
164EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f)
165EVENT(PM_L3_CO_L31, 0x4f080)
166EVENT(PM_POWER_EVENT4, 0x4006e)
167EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e)
168EVENT(PM_BR_UNCOND, 0x0409e)
169EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa)
170EVENT(PM_PMC4_REWIND, 0x10020)
171EVENT(PM_L2_RCLD_DISP, 0x16280)
172EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2)
173EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058)
174EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098)
175EVENT(PM_LSU_DERAT_MISS, 0x200f6)
176EVENT(PM_IC_PREF_CANCEL_L2, 0x04094)
177EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d)
178EVENT(PM_BR_PRED_CCACHE, 0x040a0)
179EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c)
180EVENT(PM_MRK_ST_CMPL_INT, 0x30034)
181EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6)
182EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048)
183EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
184EVENT(PM_LSU_SET_MPRED, 0x0c0a8)
185EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a)
186EVENT(PM_VSU1_FCONV, 0x0a0b2)
187EVENT(PM_DERAT_MISS_16G, 0x4c05c)
188EVENT(PM_INST_FROM_LMEM, 0x3404a)
189EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a)
190EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018)
191EVENT(PM_INST_PTEG_FROM_L2, 0x1e050)
192EVENT(PM_PTEG_FROM_L2, 0x1c050)
193EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024)
194EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a)
195EVENT(PM_VSU0_FPSCR, 0x0b09c)
196EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082)
197EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052)
198EVENT(PM_MEM0_RQ_DISP, 0x10083)
199EVENT(PM_L2_LD_MISS, 0x26080)
200EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0)
201EVENT(PM_L1_PREF, 0x0d8b8)
202EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c)
203EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c)
204EVENT(PM_PB_NODE_PUMP, 0x10081)
205EVENT(PM_SHL_MERGED, 0x05084)
206EVENT(PM_NEST_PAIR1_ADD, 0x20881)
207EVENT(PM_DATA_FROM_L3, 0x1c048)
208EVENT(PM_LSU_FLUSH, 0x0208e)
209EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097)
210EVENT(PM_PMC2_OVERFLOW, 0x30010)
211EVENT(PM_LSU_LDF, 0x0c884)
212EVENT(PM_POWER_EVENT3, 0x3006e)
213EVENT(PM_DISP_WT, 0x30008)
214EVENT(PM_CMPLU_STALL_REJECT, 0x40016)
215EVENT(PM_IC_BANK_CONFLICT, 0x04082)
216EVENT(PM_BR_MPRED_CR_TA, 0x048ae)
217EVENT(PM_L2_INST_MISS, 0x36082)
218EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018)
219EVENT(PM_NEST_PAIR2_ADD, 0x30881)
220EVENT(PM_MRK_LSU_FLUSH, 0x0d08c)
221EVENT(PM_L2_LDST, 0x16880)
222EVENT(PM_INST_FROM_L31_SHR, 0x1404e)
223EVENT(PM_VSU0_FIN, 0x0a0bc)
224EVENT(PM_LARX_LSU, 0x0c894)
225EVENT(PM_INST_FROM_RMEM, 0x34042)
226EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096)
227EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e)
228EVENT(PM_BR_PRED_CR, 0x040a8)
229EVENT(PM_LSU_REJECT, 0x10064)
230EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e)
231EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028)
232EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4)
233EVENT(PM_VSU_FEST, 0x0a8b8)
234EVENT(PM_NEST_PAIR0_AND, 0x10883)
235EVENT(PM_PTEG_FROM_L3, 0x2c050)
236EVENT(PM_POWER_EVENT2, 0x2006e)
237EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090)
238EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088)
239EVENT(PM_MRK_GRP_CMPL, 0x40030)
240EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088)
241EVENT(PM_GRP_DISP, 0x3000a)
242EVENT(PM_LSU0_LDX, 0x0c088)
243EVENT(PM_DATA_FROM_L2, 0x1c040)
244EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042)
245EVENT(PM_LD_REF_L1, 0x0c880)
246EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080)
247EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e)
248EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6)
249EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba)
250EVENT(PM_BR_MPRED_CR, 0x040ac)
251EVENT(PM_L3_CO_MEM, 0x4f082)
252EVENT(PM_LD_MISS_L1, 0x400f0)
253EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042)
254EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a)
255EVENT(PM_TABLEWALK_CYC, 0x10026)
256EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052)
257EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0)
258EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052)
259EVENT(PM_FXU0_FIN, 0x10004)
260EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e)
261EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054)
262EVENT(PM_PMC5_OVERFLOW, 0x10024)
263EVENT(PM_LD_REF_L1_LSU1, 0x0c082)
264EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056)
265EVENT(PM_CMPLU_STALL_THRD, 0x1001c)
266EVENT(PM_DATA_FROM_RMEM, 0x3c042)
267EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084)
268EVENT(PM_BR_MPRED_LSTACK, 0x040a6)
269EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028)
270EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4)
271EVENT(PM_LSU_NCST, 0x0c090)
272EVENT(PM_BR_TAKEN, 0x20004)
273EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052)
274EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c)
275EVENT(PM_DTLB_MISS_4K, 0x2c05a)
276EVENT(PM_PMC4_SAVED, 0x30022)
277EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092)
278EVENT(PM_SLB_MISS, 0x0d890)
279EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba)
280EVENT(PM_DTLB_MISS, 0x300fc)
281EVENT(PM_VSU1_FRSP, 0x0a0b6)
282EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880)
283EVENT(PM_L2_CASTOUT_SHR, 0x16182)
284EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044)
285EVENT(PM_VSU1_STF, 0x0b08e)
286EVENT(PM_ST_FIN, 0x200f0)
287EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056)
288EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480)
289EVENT(PM_MRK_STCX_FAIL, 0x0d08e)
290EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac)
291EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092)
292EVENT(PM_L3_PREF_BUSY, 0x4f080)
293EVENT(PM_MRK_BRU_FIN, 0x2003a)
294EVENT(PM_LSU1_NCLD, 0x0c08e)
295EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054)
296EVENT(PM_LSU_NCLD, 0x0c88c)
297EVENT(PM_LSU_LDX, 0x0c888)
298EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480)
299EVENT(PM_THRESH_TIMEO, 0x10038)
300EVENT(PM_L3_PREF_ST, 0x0d0ae)
301EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098)
302EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894)
303EVENT(PM_VSU1_SINGLE, 0x0a0aa)
304EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a)
305EVENT(PM_L2_RC_ST_DONE, 0x36380)
306EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056)
307EVENT(PM_LARX_LSU1, 0x0c096)
308EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042)
309EVENT(PM_DISP_CLB_HELD, 0x02090)
310EVENT(PM_DERAT_MISS_4K, 0x1c05c)
311EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282)
312EVENT(PM_SEG_EXCEPTION, 0x028a4)
313EVENT(PM_FLUSH_DISP_SB, 0x0208c)
314EVENT(PM_L2_DC_INV, 0x26182)
315EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054)
316EVENT(PM_DSEG, 0x020a6)
317EVENT(PM_BR_PRED_LSTACK, 0x040a2)
318EVENT(PM_VSU0_STF, 0x0b08c)
319EVENT(PM_LSU_FX_FIN, 0x10066)
320EVENT(PM_DERAT_MISS_16M, 0x3c05c)
321EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054)
322EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2)
323EVENT(PM_INST_FROM_L3, 0x14048)
324EVENT(PM_MRK_IFU_FIN, 0x3003a)
325EVENT(PM_ITLB_MISS, 0x400fc)
326EVENT(PM_VSU_STF, 0x0b88c)
327EVENT(PM_LSU_FLUSH_UST, 0x0c8b4)
328EVENT(PM_L2_LDST_MISS, 0x26880)
329EVENT(PM_FXU1_FIN, 0x40004)
330EVENT(PM_SHL_DEALLOCATED, 0x05080)
331EVENT(PM_L2_SN_M_WR_DONE, 0x46382)
332EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8)
333EVENT(PM_L3_PREF_LD, 0x0d0ac)
334EVENT(PM_L2_SN_M_RD_DONE, 0x46380)
335EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c)
336EVENT(PM_VSU_FCONV, 0x0a8b0)
337EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa)
338EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4)
339EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082)
340EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e)
341EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020)
342EVENT(PM_INST_IMC_MATCH_DISP, 0x30016)
343EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c)
344EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094)
345EVENT(PM_CMPLU_STALL_DIV, 0x40014)
346EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054)
347EVENT(PM_VSU_FMA_DOUBLE, 0x0a890)
348EVENT(PM_VSU_4FLOP, 0x0a89c)
349EVENT(PM_VSU1_FIN, 0x0a0be)
350EVENT(PM_NEST_PAIR1_AND, 0x20883)
351EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052)
352EVENT(PM_RUN_CYC, 0x200f4)
353EVENT(PM_PTEG_FROM_RMEM, 0x3c052)
354EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e)
355EVENT(PM_LSU0_LDF, 0x0c084)
356EVENT(PM_FLUSH_COMPLETION, 0x30012)
357EVENT(PM_ST_MISS_L1, 0x300f0)
358EVENT(PM_L2_NODE_PUMP, 0x36480)
359EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044)
360EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e)
361EVENT(PM_VSU1_DENORM, 0x0a0ae)
362EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026)
363EVENT(PM_NEST_PAIR0_ADD, 0x10881)
364EVENT(PM_INST_FROM_L3MISS, 0x24048)
365EVENT(PM_EE_OFF_EXT_INT, 0x02080)
366EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052)
367EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c)
368EVENT(PM_PMC6_OVERFLOW, 0x30024)
369EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c)
370EVENT(PM_TLB_MISS, 0x20066)
371EVENT(PM_FXU_BUSY, 0x2000e)
372EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280)
373EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4)
374EVENT(PM_IC_RELOAD_SHR, 0x04096)
375EVENT(PM_GRP_MRK, 0x10031)
376EVENT(PM_MRK_ST_NEST, 0x20034)
377EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a)
378EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8)
379EVENT(PM_LARX_LSU0, 0x0c094)
380EVENT(PM_IBUF_FULL_CYC, 0x04084)
381EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a)
382EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8)
383EVENT(PM_GRP_MRK_CYC, 0x10030)
384EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028)
385EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482)
386EVENT(PM_LSU_REJECT_LHS, 0x0c8ac)
387EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a)
388EVENT(PM_INST_PTEG_FROM_L3, 0x2e050)
389EVENT(PM_FREQ_DOWN, 0x3000c)
390EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081)
391EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c)
392EVENT(PM_MRK_INST_ISSUED, 0x10032)
393EVENT(PM_PTEG_FROM_L3MISS, 0x2c058)
394EVENT(PM_RUN_PURR, 0x400f4)
395EVENT(PM_MRK_GRP_IC_MISS, 0x40038)
396EVENT(PM_MRK_DATA_FROM_L3, 0x1d048)
397EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016)
398EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054)
399EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8)
400EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c)
401EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054)
402EVENT(PM_L2_ST_MISS, 0x26082)
403EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056)
404EVENT(PM_LWSYNC, 0x0d094)
405EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc)
406EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088)
407EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0)
408EVENT(PM_NEST_PAIR3_AND, 0x40883)
409EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081)
410EVENT(PM_MRK_INST_FIN, 0x30030)
411EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054)
412EVENT(PM_INST_FROM_L31_MOD, 0x14044)
413EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e)
414EVENT(PM_LSU_FIN, 0x30066)
415EVENT(PM_MRK_LSU_REJECT, 0x40064)
416EVENT(PM_L2_CO_FAIL_BUSY, 0x16382)
417EVENT(PM_MEM0_WQ_DISP, 0x40083)
418EVENT(PM_DATA_FROM_L31_MOD, 0x1c044)
419EVENT(PM_THERMAL_WARN, 0x10016)
420EVENT(PM_VSU0_4FLOP, 0x0a09c)
421EVENT(PM_BR_MPRED_CCACHE, 0x040a4)
422EVENT(PM_CMPLU_STALL_IFU, 0x4004c)
423EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
424EVENT(PM_FLUSH_BR_MPRED, 0x02084)
425EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e)
426EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052)
427EVENT(PM_L2_RCST_DISP, 0x36280)
428EVENT(PM_CMPLU_STALL, 0x4000a)
429EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa)
430EVENT(PM_DISP_CLB_HELD_SB, 0x020a8)
431EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090)
432EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e)
433EVENT(PM_IC_DEMAND_CYC, 0x10018)
434EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e)
435EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086)
436EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058)
437EVENT(PM_VSU_DENORM, 0x0a8ac)
438EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080)
439EVENT(PM_INST_FROM_L21_SHR, 0x3404e)
440EVENT(PM_IC_PREF_WRITE, 0x0408e)
441EVENT(PM_BR_PRED, 0x0409c)
442EVENT(PM_INST_FROM_DMEM, 0x1404a)
443EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890)
444EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4)
445EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a)
446EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c)
447EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280)
448EVENT(PM_VSU1_DD_ISSUED, 0x0b098)
449EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056)
450EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e)
451EVENT(PM_LSU0_NCLD, 0x0c08c)
452EVENT(PM_VSU1_4FLOP, 0x0a09e)
453EVENT(PM_VSU1_8FLOP, 0x0a0a2)
454EVENT(PM_VSU_8FLOP, 0x0a8a0)
455EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e)
456EVENT(PM_DTLB_MISS_64K, 0x3c05e)
457EVENT(PM_THRD_CONC_RUN_INST, 0x300f4)
458EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050)
459EVENT(PM_PB_SYS_PUMP, 0x20081)
460EVENT(PM_VSU_FIN, 0x0a8bc)
461EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044)
462EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0)
463EVENT(PM_DERAT_MISS_64K, 0x2c05c)
464EVENT(PM_PMC2_REWIND, 0x30020)
465EVENT(PM_INST_FROM_L2, 0x14040)
466EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a)
467EVENT(PM_INST_DISP, 0x200f2)
468EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083)
469EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4)
470EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6)
471EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888)
472EVENT(PM_L3_PREF_HIT, 0x3f080)
473EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054)
474EVENT(PM_CMPLU_STALL_STORE, 0x2004a)
475EVENT(PM_MRK_FXU_FIN, 0x20038)
476EVENT(PM_PMC4_OVERFLOW, 0x10010)
477EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050)
478EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098)
479EVENT(PM_BTAC_HIT, 0x0508a)
480EVENT(PM_L3_RD_BUSY, 0x4f082)
481EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c)
482EVENT(PM_INST_FROM_L2MISS, 0x44048)
483EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8)
484EVENT(PM_L2_ST, 0x16082)
485EVENT(PM_VSU0_DENORM, 0x0a0ac)
486EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044)
487EVENT(PM_BR_PRED_CR_TA, 0x048aa)
488EVENT(PM_VSU0_FCONV, 0x0a0b0)
489EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084)
490EVENT(PM_BTAC_MISS, 0x05088)
491EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f)
492EVENT(PM_MRK_DATA_FROM_L2, 0x1d040)
493EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2)
494EVENT(PM_VSU_FMA, 0x0a884)
495EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc)
496EVENT(PM_LSU1_L1_PREF, 0x0d0ba)
497EVENT(PM_IOPS_CMPL, 0x10014)
498EVENT(PM_L2_SYS_PUMP, 0x36482)
499EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282)
500EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1)
501EVENT(PM_FLUSH_DISP_SYNC, 0x02088)
502EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a)
503EVENT(PM_L2_IC_INV, 0x26180)
504EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024)
505EVENT(PM_L3_PREF_LDST, 0x0d8ac)
506EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008)
507EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0)
508EVENT(PM_FLUSH_PARTIAL, 0x02086)
509EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092)
510EVENT(PM_1PLUS_PPC_DISP, 0x400f2)
511EVENT(PM_DATA_FROM_L2MISS, 0x200fe)
512EVENT(PM_SUSPENDED, 0x00000)
513EVENT(PM_VSU0_FMA, 0x0a084)
514EVENT(PM_CMPLU_STALL_SCALAR, 0x40012)
515EVENT(PM_STCX_FAIL, 0x0c09a)
516EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094)
517EVENT(PM_DC_PREF_DST, 0x0d0b0)
518EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086)
519EVENT(PM_L3_HIT, 0x1f080)
520EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482)
521EVENT(PM_MRK_DFU_FIN, 0x20032)
522EVENT(PM_INST_FROM_L1, 0x04080)
523EVENT(PM_BRU_FIN, 0x10068)
524EVENT(PM_IC_DEMAND_REQ, 0x04088)
525EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096)
526EVENT(PM_VSU1_FMA, 0x0a086)
527EVENT(PM_MRK_LD_MISS_L1, 0x20036)
528EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c)
529EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc)
530EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056)
531EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064)
532EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048)
533EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c)
534EVENT(PM_INST_FROM_PREF, 0x14046)
535EVENT(PM_VSU1_SQ, 0x0b09e)
536EVENT(PM_L2_LD_DISP, 0x36180)
537EVENT(PM_L2_DISP_ALL, 0x46080)
538EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012)
539EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894)
540EVENT(PM_BR_MPRED, 0x400f6)
541EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054)
542EVENT(PM_VSU_1FLOP, 0x0a880)
543EVENT(PM_HV_CYC, 0x2000a)
544EVENT(PM_MRK_LSU_FIN, 0x40032)
545EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c)
546EVENT(PM_DTLB_MISS_16M, 0x4c05e)
547EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a)
548EVENT(PM_IFU_FIN, 0x40066)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index d1821b8bbc4c..56c67bca2f75 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -53,37 +53,13 @@
53/* 53/*
54 * Power7 event codes. 54 * Power7 event codes.
55 */ 55 */
56#define PME_PM_CYC 0x1e 56#define EVENT(_name, _code) \
57#define PME_PM_GCT_NOSLOT_CYC 0x100f8 57 PME_##_name = _code,
58#define PME_PM_CMPLU_STALL 0x4000a 58
59#define PME_PM_INST_CMPL 0x2 59enum {
60#define PME_PM_LD_REF_L1 0xc880 60#include "power7-events-list.h"
61#define PME_PM_LD_MISS_L1 0x400f0 61};
62#define PME_PM_BRU_FIN 0x10068 62#undef EVENT
63#define PME_PM_BR_MPRED 0x400f6
64
65#define PME_PM_CMPLU_STALL_FXU 0x20014
66#define PME_PM_CMPLU_STALL_DIV 0x40014
67#define PME_PM_CMPLU_STALL_SCALAR 0x40012
68#define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018
69#define PME_PM_CMPLU_STALL_VECTOR 0x2001c
70#define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a
71#define PME_PM_CMPLU_STALL_LSU 0x20012
72#define PME_PM_CMPLU_STALL_REJECT 0x40016
73#define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018
74#define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016
75#define PME_PM_CMPLU_STALL_STORE 0x2004a
76#define PME_PM_CMPLU_STALL_THRD 0x1001c
77#define PME_PM_CMPLU_STALL_IFU 0x4004c
78#define PME_PM_CMPLU_STALL_BRU 0x4004e
79#define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a
80#define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a
81#define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c
82#define PME_PM_GRP_CMPL 0x30004
83#define PME_PM_1PLUS_PPC_CMPL 0x100f2
84#define PME_PM_CMPLU_STALL_DFU 0x2003c
85#define PME_PM_RUN_CYC 0x200f4
86#define PME_PM_RUN_INST_CMPL 0x400fa
87 63
88/* 64/*
89 * Layout of constraint bits: 65 * Layout of constraint bits:
@@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
398}; 374};
399 375
400 376
401GENERIC_EVENT_ATTR(cpu-cycles, CYC); 377GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
402GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); 378GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
403GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); 379GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
404GENERIC_EVENT_ATTR(instructions, INST_CMPL); 380GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
405GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); 381GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
406GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); 382GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
407GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); 383GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
408GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); 384GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED);
409 385
410POWER_EVENT_ATTR(CYC, CYC); 386#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name);
411POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); 387#include "power7-events-list.h"
412POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); 388#undef EVENT
413POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); 389
414POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); 390#define EVENT(_name, _code) POWER_EVENT_PTR(_name),
415POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
416POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
417POWER_EVENT_ATTR(BR_MPRED, BR_MPRED);
418
419POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU);
420POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV);
421POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR);
422POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG);
423POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR);
424POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG);
425POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU);
426POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT);
427
428POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS);
429POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS);
430POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE);
431POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD);
432POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU);
433POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU);
434POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS);
435
436POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED);
437POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS);
438POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL);
439POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL);
440POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU);
441POWER_EVENT_ATTR(RUN_CYC, RUN_CYC);
442POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL);
443 391
444static struct attribute *power7_events_attr[] = { 392static struct attribute *power7_events_attr[] = {
445 GENERIC_EVENT_PTR(CYC), 393 GENERIC_EVENT_PTR(PM_CYC),
446 GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), 394 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
447 GENERIC_EVENT_PTR(CMPLU_STALL), 395 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
448 GENERIC_EVENT_PTR(INST_CMPL), 396 GENERIC_EVENT_PTR(PM_INST_CMPL),
449 GENERIC_EVENT_PTR(LD_REF_L1), 397 GENERIC_EVENT_PTR(PM_LD_REF_L1),
450 GENERIC_EVENT_PTR(LD_MISS_L1), 398 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
451 GENERIC_EVENT_PTR(BRU_FIN), 399 GENERIC_EVENT_PTR(PM_BRU_FIN),
452 GENERIC_EVENT_PTR(BR_MPRED), 400 GENERIC_EVENT_PTR(PM_BR_MPRED),
453 401
454 POWER_EVENT_PTR(CYC), 402 #include "power7-events-list.h"
455 POWER_EVENT_PTR(GCT_NOSLOT_CYC), 403 #undef EVENT
456 POWER_EVENT_PTR(CMPLU_STALL),
457 POWER_EVENT_PTR(INST_CMPL),
458 POWER_EVENT_PTR(LD_REF_L1),
459 POWER_EVENT_PTR(LD_MISS_L1),
460 POWER_EVENT_PTR(BRU_FIN),
461 POWER_EVENT_PTR(BR_MPRED),
462
463 POWER_EVENT_PTR(CMPLU_STALL_FXU),
464 POWER_EVENT_PTR(CMPLU_STALL_DIV),
465 POWER_EVENT_PTR(CMPLU_STALL_SCALAR),
466 POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG),
467 POWER_EVENT_PTR(CMPLU_STALL_VECTOR),
468 POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG),
469 POWER_EVENT_PTR(CMPLU_STALL_LSU),
470 POWER_EVENT_PTR(CMPLU_STALL_REJECT),
471
472 POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS),
473 POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS),
474 POWER_EVENT_PTR(CMPLU_STALL_STORE),
475 POWER_EVENT_PTR(CMPLU_STALL_THRD),
476 POWER_EVENT_PTR(CMPLU_STALL_IFU),
477 POWER_EVENT_PTR(CMPLU_STALL_BRU),
478 POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS),
479 POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED),
480
481 POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS),
482 POWER_EVENT_PTR(GRP_CMPL),
483 POWER_EVENT_PTR(1PLUS_PPC_CMPL),
484 POWER_EVENT_PTR(CMPLU_STALL_DFU),
485 POWER_EVENT_PTR(RUN_CYC),
486 POWER_EVENT_PTR(RUN_INST_CMPL),
487 NULL 404 NULL
488}; 405};
489 406
490
491static struct attribute_group power7_pmu_events_group = { 407static struct attribute_group power7_pmu_events_group = {
492 .name = "events", 408 .name = "events",
493 .attrs = power7_events_attr, 409 .attrs = power7_events_attr,
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 4cfa49901c02..534574a97ec9 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,7 +16,6 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/of_gpio.h> 18#include <linux/of_gpio.h>
19#include <linux/of_i2c.h>
20#include <linux/slab.h> 19#include <linux/slab.h>
21#include <linux/export.h> 20#include <linux/export.h>
22 21
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index b89ef65392dc..b69221ba07fd 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -373,8 +373,9 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
373 case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break; 373 case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
374 case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break; 374 case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
375 case MPC52xx_IRQ_L1_CRIT: 375 case MPC52xx_IRQ_L1_CRIT:
376 default:
376 pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n", 377 pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
377 __func__, l2irq); 378 __func__, l1irq);
378 irq_set_chip(virq, &no_irq_chip); 379 irq_set_chip(virq, &no_irq_chip);
379 return 0; 380 return 0;
380 } 381 }
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index efdd37c775ad..de2eb9320993 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -32,6 +32,12 @@ config BSC9131_RDB
32 StarCore SC3850 DSP 32 StarCore SC3850 DSP
33 Manufacturer : Freescale Semiconductor, Inc 33 Manufacturer : Freescale Semiconductor, Inc
34 34
35config C293_PCIE
36 bool "Freescale C293PCIE"
37 select DEFAULT_UIMAGE
38 help
39 This option enables support for the C293PCIE board
40
35config MPC8540_ADS 41config MPC8540_ADS
36 bool "Freescale MPC8540 ADS" 42 bool "Freescale MPC8540 ADS"
37 select DEFAULT_UIMAGE 43 select DEFAULT_UIMAGE
@@ -112,10 +118,10 @@ config P1022_RDK
112 reference board. 118 reference board.
113 119
114config P1023_RDS 120config P1023_RDS
115 bool "Freescale P1023 RDS" 121 bool "Freescale P1023 RDS/RDB"
116 select DEFAULT_UIMAGE 122 select DEFAULT_UIMAGE
117 help 123 help
118 This option enables support for the P1023 RDS board 124 This option enables support for the P1023 RDS and RDB boards
119 125
120config SOCRATES 126config SOCRATES
121 bool "Socrates" 127 bool "Socrates"
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 2eab37ea4a9d..53c9f75a6907 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_SMP) += smp.o
6obj-y += common.o 6obj-y += common.o
7 7
8obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o 8obj-$(CONFIG_BSC9131_RDB) += bsc913x_rdb.o
9obj-$(CONFIG_C293_PCIE) += c293pcie.o
9obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o 10obj-$(CONFIG_MPC8540_ADS) += mpc85xx_ads.o
10obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o 11obj-$(CONFIG_MPC8560_ADS) += mpc85xx_ads.o
11obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o 12obj-$(CONFIG_MPC85xx_CDS) += mpc85xx_cds.o
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
new file mode 100644
index 000000000000..6208e49142bf
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -0,0 +1,75 @@
1/*
2 * C293PCIE Board Setup
3 *
4 * Copyright 2013 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/stddef.h>
13#include <linux/kernel.h>
14#include <linux/of_platform.h>
15
16#include <asm/machdep.h>
17#include <asm/udbg.h>
18#include <asm/mpic.h>
19
20#include <sysdev/fsl_soc.h>
21#include <sysdev/fsl_pci.h>
22
23#include "mpc85xx.h"
24
25void __init c293_pcie_pic_init(void)
26{
27 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
28 MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC ");
29
30 BUG_ON(mpic == NULL);
31
32 mpic_init(mpic);
33}
34
35
36/*
37 * Setup the architecture
38 */
39static void __init c293_pcie_setup_arch(void)
40{
41 if (ppc_md.progress)
42 ppc_md.progress("c293_pcie_setup_arch()", 0);
43
44 fsl_pci_assign_primary();
45
46 printk(KERN_INFO "C293 PCIE board from Freescale Semiconductor\n");
47}
48
49machine_arch_initcall(c293_pcie, mpc85xx_common_publish_devices);
50
51/*
52 * Called very early, device-tree isn't unflattened
53 */
54static int __init c293_pcie_probe(void)
55{
56 unsigned long root = of_get_flat_dt_root();
57
58 if (of_flat_dt_is_compatible(root, "fsl,C293PCIE"))
59 return 1;
60 return 0;
61}
62
63define_machine(c293_pcie) {
64 .name = "C293 PCIE",
65 .probe = c293_pcie_probe,
66 .setup_arch = c293_pcie_setup_arch,
67 .init_IRQ = c293_pcie_pic_init,
68#ifdef CONFIG_PCI
69 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
70#endif
71 .get_irq = mpic_get_irq,
72 .restart = fsl_rstcr_restart,
73 .calibrate_decr = generic_calibrate_decr,
74 .progress = udbg_progress,
75};
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index c59c617eee93..aa3690bae415 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -53,12 +53,6 @@ void __init corenet_ds_setup_arch(void)
53{ 53{
54 mpc85xx_smp_init(); 54 mpc85xx_smp_init();
55 55
56#if defined(CONFIG_PCI) && defined(CONFIG_PPC64)
57 pci_devs_phb_init();
58#endif
59
60 fsl_pci_assign_primary();
61
62 swiotlb_detect_4g(); 56 swiotlb_detect_4g();
63 57
64 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name); 58 pr_info("%s board from Freescale Semiconductor\n", ppc_md.name);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index ede8771d6f02..53b6fb0a3d56 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -160,6 +160,7 @@ machine_arch_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
160machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices); 160machine_arch_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
161machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices); 161machine_arch_initcall(p1020_rdb, mpc85xx_common_publish_devices);
162machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices); 162machine_arch_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
163machine_arch_initcall(p1020_rdb_pd, mpc85xx_common_publish_devices);
163machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices); 164machine_arch_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
164machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices); 165machine_arch_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
165machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices); 166machine_arch_initcall(p1025_rdb, mpc85xx_common_publish_devices);
@@ -193,6 +194,13 @@ static int __init p1020_rdb_pc_probe(void)
193 return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC"); 194 return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
194} 195}
195 196
197static int __init p1020_rdb_pd_probe(void)
198{
199 unsigned long root = of_get_flat_dt_root();
200
201 return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PD");
202}
203
196static int __init p1021_rdb_pc_probe(void) 204static int __init p1021_rdb_pc_probe(void)
197{ 205{
198 unsigned long root = of_get_flat_dt_root(); 206 unsigned long root = of_get_flat_dt_root();
@@ -351,6 +359,20 @@ define_machine(p1020_rdb_pc) {
351 .progress = udbg_progress, 359 .progress = udbg_progress,
352}; 360};
353 361
362define_machine(p1020_rdb_pd) {
363 .name = "P1020RDB-PD",
364 .probe = p1020_rdb_pd_probe,
365 .setup_arch = mpc85xx_rdb_setup_arch,
366 .init_IRQ = mpc85xx_rdb_pic_init,
367#ifdef CONFIG_PCI
368 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
369#endif
370 .get_irq = mpic_get_irq,
371 .restart = fsl_rstcr_restart,
372 .calibrate_decr = generic_calibrate_decr,
373 .progress = udbg_progress,
374};
375
354define_machine(p1024_rdb) { 376define_machine(p1024_rdb) {
355 .name = "P1024 RDB", 377 .name = "P1024 RDB",
356 .probe = p1024_rdb_probe, 378 .probe = p1024_rdb_probe,
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index 9cc60a738834..2ae9d490c3d9 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010-2011 Freescale Semiconductor, Inc. 2 * Copyright 2010-2011, 2013 Freescale Semiconductor, Inc.
3 * 3 *
4 * Author: Roy Zang <tie-fei.zang@freescale.com> 4 * Author: Roy Zang <tie-fei.zang@freescale.com>
5 * 5 *
@@ -86,6 +86,7 @@ static void __init mpc85xx_rds_setup_arch(void)
86} 86}
87 87
88machine_arch_initcall(p1023_rds, mpc85xx_common_publish_devices); 88machine_arch_initcall(p1023_rds, mpc85xx_common_publish_devices);
89machine_arch_initcall(p1023_rdb, mpc85xx_common_publish_devices);
89 90
90static void __init mpc85xx_rds_pic_init(void) 91static void __init mpc85xx_rds_pic_init(void)
91{ 92{
@@ -106,6 +107,14 @@ static int __init p1023_rds_probe(void)
106 107
107} 108}
108 109
110static int __init p1023_rdb_probe(void)
111{
112 unsigned long root = of_get_flat_dt_root();
113
114 return of_flat_dt_is_compatible(root, "fsl,P1023RDB");
115
116}
117
109define_machine(p1023_rds) { 118define_machine(p1023_rds) {
110 .name = "P1023 RDS", 119 .name = "P1023 RDS",
111 .probe = p1023_rds_probe, 120 .probe = p1023_rds_probe,
@@ -120,3 +129,16 @@ define_machine(p1023_rds) {
120#endif 129#endif
121}; 130};
122 131
132define_machine(p1023_rdb) {
133 .name = "P1023 RDB",
134 .probe = p1023_rdb_probe,
135 .setup_arch = mpc85xx_rds_setup_arch,
136 .init_IRQ = mpc85xx_rds_pic_init,
137 .get_irq = mpic_get_irq,
138 .restart = fsl_rstcr_restart,
139 .calibrate_decr = generic_calibrate_decr,
140 .progress = udbg_progress,
141#ifdef CONFIG_PCI
142 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
143#endif
144};
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 5ced4f5bb2b2..281b7f01df63 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -69,7 +69,32 @@ static void mpc85xx_give_timebase(void)
69 tb_req = 0; 69 tb_req = 0;
70 70
71 mpc85xx_timebase_freeze(1); 71 mpc85xx_timebase_freeze(1);
72#ifdef CONFIG_PPC64
73 /*
74 * e5500/e6500 have a workaround for erratum A-006958 in place
75 * that will reread the timebase until TBL is non-zero.
76 * That would be a bad thing when the timebase is frozen.
77 *
78 * Thus, we read it manually, and instead of checking that
79 * TBL is non-zero, we ensure that TB does not change. We don't
80 * do that for the main mftb implementation, because it requires
81 * a scratch register
82 */
83 {
84 u64 prev;
85
86 asm volatile("mfspr %0, %1" : "=r" (timebase) :
87 "i" (SPRN_TBRL));
88
89 do {
90 prev = timebase;
91 asm volatile("mfspr %0, %1" : "=r" (timebase) :
92 "i" (SPRN_TBRL));
93 } while (prev != timebase);
94 }
95#else
72 timebase = get_tb(); 96 timebase = get_tb();
97#endif
73 mb(); 98 mb();
74 tb_valid = 1; 99 tb_valid = 1;
75 100
@@ -255,6 +280,7 @@ out:
255 280
256struct smp_ops_t smp_85xx_ops = { 281struct smp_ops_t smp_85xx_ops = {
257 .kick_cpu = smp_85xx_kick_cpu, 282 .kick_cpu = smp_85xx_kick_cpu,
283 .cpu_bootable = smp_generic_cpu_bootable,
258#ifdef CONFIG_HOTPLUG_CPU 284#ifdef CONFIG_HOTPLUG_CPU
259 .cpu_disable = generic_cpu_disable, 285 .cpu_disable = generic_cpu_disable,
260 .cpu_die = generic_cpu_die, 286 .cpu_die = generic_cpu_die,
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index d703775bda30..bf9c6d4cd26c 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -202,17 +202,12 @@ config PPC_P7_NAP
202 bool 202 bool
203 default n 203 default n
204 204
205config PPC_INDIRECT_IO
206 bool
207 select GENERIC_IOMAP
208
209config PPC_INDIRECT_PIO 205config PPC_INDIRECT_PIO
210 bool 206 bool
211 select PPC_INDIRECT_IO 207 select GENERIC_IOMAP
212 208
213config PPC_INDIRECT_MMIO 209config PPC_INDIRECT_MMIO
214 bool 210 bool
215 select PPC_INDIRECT_IO
216 211
217config PPC_IO_WORKAROUNDS 212config PPC_IO_WORKAROUNDS
218 bool 213 bool
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 47d9a03dd415..6704e2e20e6b 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -96,18 +96,31 @@ config GENERIC_CPU
96 96
97config CELL_CPU 97config CELL_CPU
98 bool "Cell Broadband Engine" 98 bool "Cell Broadband Engine"
99 depends on PPC_BOOK3S_64
99 100
100config POWER4_CPU 101config POWER4_CPU
101 bool "POWER4" 102 bool "POWER4"
103 depends on PPC_BOOK3S_64
102 104
103config POWER5_CPU 105config POWER5_CPU
104 bool "POWER5" 106 bool "POWER5"
107 depends on PPC_BOOK3S_64
105 108
106config POWER6_CPU 109config POWER6_CPU
107 bool "POWER6" 110 bool "POWER6"
111 depends on PPC_BOOK3S_64
108 112
109config POWER7_CPU 113config POWER7_CPU
110 bool "POWER7" 114 bool "POWER7"
115 depends on PPC_BOOK3S_64
116
117config E5500_CPU
118 bool "Freescale e5500"
119 depends on E500
120
121config E6500_CPU
122 bool "Freescale e6500"
123 depends on E500
111 124
112endchoice 125endchoice
113 126
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 946306b1bb4e..b53560660b72 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -697,7 +697,7 @@ static int __init cell_iommu_get_window(struct device_node *np,
697 unsigned long *base, 697 unsigned long *base,
698 unsigned long *size) 698 unsigned long *size)
699{ 699{
700 const void *dma_window; 700 const __be32 *dma_window;
701 unsigned long index; 701 unsigned long index;
702 702
703 /* Use ibm,dma-window if available, else, hard code ! */ 703 /* Use ibm,dma-window if available, else, hard code ! */
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index f75f6fcac729..90745eaa45fe 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -136,25 +136,12 @@ static int smp_cell_kick_cpu(int nr)
136 return 0; 136 return 0;
137} 137}
138 138
139static int smp_cell_cpu_bootable(unsigned int nr)
140{
141 /* Special case - we inhibit secondary thread startup
142 * during boot if the user requests it. Odd-numbered
143 * cpus are assumed to be secondary threads.
144 */
145 if (system_state == SYSTEM_BOOTING &&
146 cpu_has_feature(CPU_FTR_SMT) &&
147 !smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
148 return 0;
149
150 return 1;
151}
152static struct smp_ops_t bpa_iic_smp_ops = { 139static struct smp_ops_t bpa_iic_smp_ops = {
153 .message_pass = iic_message_pass, 140 .message_pass = iic_message_pass,
154 .probe = smp_iic_probe, 141 .probe = smp_iic_probe,
155 .kick_cpu = smp_cell_kick_cpu, 142 .kick_cpu = smp_cell_kick_cpu,
156 .setup_cpu = smp_cell_setup_cpu, 143 .setup_cpu = smp_cell_setup_cpu,
157 .cpu_bootable = smp_cell_cpu_bootable, 144 .cpu_bootable = smp_generic_cpu_bootable,
158}; 145};
159 146
160/* This is called very early */ 147/* This is called very early */
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index c24684c818ab..6fae5eb99ea6 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -7,6 +7,8 @@ config PPC_POWERNV
7 select PPC_P7_NAP 7 select PPC_P7_NAP
8 select PPC_PCI_CHOICE if EMBEDDED 8 select PPC_PCI_CHOICE if EMBEDDED
9 select EPAPR_BOOT 9 select EPAPR_BOOT
10 select PPC_INDIRECT_PIO
11 select PPC_UDBG_16550
10 default y 12 default y
11 13
12config POWERNV_MSI 14config POWERNV_MSI
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 7fe595152478..300c437d713c 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,5 +1,5 @@
1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o 1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
2obj-y += opal-rtc.o opal-nvram.o 2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o
3 3
4obj-$(CONFIG_SMP) += smp.o 4obj-$(CONFIG_SMP) += smp.o
5obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o 5obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 0cd1c4a71755..cf42e74514fa 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,13 +36,6 @@
36#include "powernv.h" 36#include "powernv.h"
37#include "pci.h" 37#include "pci.h"
38 38
39/* Debugging option */
40#ifdef IODA_EEH_DBG_ON
41#define IODA_EEH_DBG(args...) pr_info(args)
42#else
43#define IODA_EEH_DBG(args...)
44#endif
45
46static char *hub_diag = NULL; 39static char *hub_diag = NULL;
47static int ioda_eeh_nb_init = 0; 40static int ioda_eeh_nb_init = 0;
48 41
@@ -823,17 +816,17 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
823 816
824 /* If OPAL API returns error, we needn't proceed */ 817 /* If OPAL API returns error, we needn't proceed */
825 if (rc != OPAL_SUCCESS) { 818 if (rc != OPAL_SUCCESS) {
826 IODA_EEH_DBG("%s: Invalid return value on " 819 pr_devel("%s: Invalid return value on "
827 "PHB#%x (0x%lx) from opal_pci_next_error", 820 "PHB#%x (0x%lx) from opal_pci_next_error",
828 __func__, hose->global_number, rc); 821 __func__, hose->global_number, rc);
829 continue; 822 continue;
830 } 823 }
831 824
832 /* If the PHB doesn't have error, stop processing */ 825 /* If the PHB doesn't have error, stop processing */
833 if (err_type == OPAL_EEH_NO_ERROR || 826 if (err_type == OPAL_EEH_NO_ERROR ||
834 severity == OPAL_EEH_SEV_NO_ERROR) { 827 severity == OPAL_EEH_SEV_NO_ERROR) {
835 IODA_EEH_DBG("%s: No error found on PHB#%x\n", 828 pr_devel("%s: No error found on PHB#%x\n",
836 __func__, hose->global_number); 829 __func__, hose->global_number);
837 continue; 830 continue;
838 } 831 }
839 832
@@ -842,8 +835,9 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
842 * highest priority reported upon multiple errors on the 835 * highest priority reported upon multiple errors on the
843 * specific PHB. 836 * specific PHB.
844 */ 837 */
845 IODA_EEH_DBG("%s: Error (%d, %d, %d) on PHB#%x\n", 838 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
846 err_type, severity, pe_no, hose->global_number); 839 __func__, err_type, severity,
840 frozen_pe_no, hose->global_number);
847 switch (err_type) { 841 switch (err_type) {
848 case OPAL_EEH_IOC_ERROR: 842 case OPAL_EEH_IOC_ERROR:
849 if (severity == OPAL_EEH_SEV_IOC_DEAD) { 843 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
new file mode 100644
index 000000000000..a7614bb14e17
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -0,0 +1,203 @@
1/*
2 * PowerNV LPC bus handling.
3 *
4 * Copyright 2013 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/of.h>
14#include <linux/bug.h>
15
16#include <asm/machdep.h>
17#include <asm/firmware.h>
18#include <asm/xics.h>
19#include <asm/opal.h>
20
21static int opal_lpc_chip_id = -1;
22
23static u8 opal_lpc_inb(unsigned long port)
24{
25 int64_t rc;
26 uint32_t data;
27
28 if (opal_lpc_chip_id < 0 || port > 0xffff)
29 return 0xff;
30 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
31 return rc ? 0xff : data;
32}
33
34static __le16 __opal_lpc_inw(unsigned long port)
35{
36 int64_t rc;
37 uint32_t data;
38
39 if (opal_lpc_chip_id < 0 || port > 0xfffe)
40 return 0xffff;
41 if (port & 1)
42 return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
43 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
44 return rc ? 0xffff : data;
45}
46static u16 opal_lpc_inw(unsigned long port)
47{
48 return le16_to_cpu(__opal_lpc_inw(port));
49}
50
51static __le32 __opal_lpc_inl(unsigned long port)
52{
53 int64_t rc;
54 uint32_t data;
55
56 if (opal_lpc_chip_id < 0 || port > 0xfffc)
57 return 0xffffffff;
58 if (port & 3)
59 return (__le32)opal_lpc_inb(port ) << 24 |
60 (__le32)opal_lpc_inb(port + 1) << 16 |
61 (__le32)opal_lpc_inb(port + 2) << 8 |
62 opal_lpc_inb(port + 3);
63 rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
64 return rc ? 0xffffffff : data;
65}
66
67static u32 opal_lpc_inl(unsigned long port)
68{
69 return le32_to_cpu(__opal_lpc_inl(port));
70}
71
72static void opal_lpc_outb(u8 val, unsigned long port)
73{
74 if (opal_lpc_chip_id < 0 || port > 0xffff)
75 return;
76 opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 1);
77}
78
79static void __opal_lpc_outw(__le16 val, unsigned long port)
80{
81 if (opal_lpc_chip_id < 0 || port > 0xfffe)
82 return;
83 if (port & 1) {
84 opal_lpc_outb(val >> 8, port);
85 opal_lpc_outb(val , port + 1);
86 return;
87 }
88 opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 2);
89}
90
91static void opal_lpc_outw(u16 val, unsigned long port)
92{
93 __opal_lpc_outw(cpu_to_le16(val), port);
94}
95
96static void __opal_lpc_outl(__le32 val, unsigned long port)
97{
98 if (opal_lpc_chip_id < 0 || port > 0xfffc)
99 return;
100 if (port & 3) {
101 opal_lpc_outb(val >> 24, port);
102 opal_lpc_outb(val >> 16, port + 1);
103 opal_lpc_outb(val >> 8, port + 2);
104 opal_lpc_outb(val , port + 3);
105 return;
106 }
107 opal_lpc_write(opal_lpc_chip_id, OPAL_LPC_IO, port, val, 4);
108}
109
110static void opal_lpc_outl(u32 val, unsigned long port)
111{
112 __opal_lpc_outl(cpu_to_le32(val), port);
113}
114
115static void opal_lpc_insb(unsigned long p, void *b, unsigned long c)
116{
117 u8 *ptr = b;
118
119 while(c--)
120 *(ptr++) = opal_lpc_inb(p);
121}
122
123static void opal_lpc_insw(unsigned long p, void *b, unsigned long c)
124{
125 __le16 *ptr = b;
126
127 while(c--)
128 *(ptr++) = __opal_lpc_inw(p);
129}
130
131static void opal_lpc_insl(unsigned long p, void *b, unsigned long c)
132{
133 __le32 *ptr = b;
134
135 while(c--)
136 *(ptr++) = __opal_lpc_inl(p);
137}
138
139static void opal_lpc_outsb(unsigned long p, const void *b, unsigned long c)
140{
141 const u8 *ptr = b;
142
143 while(c--)
144 opal_lpc_outb(*(ptr++), p);
145}
146
147static void opal_lpc_outsw(unsigned long p, const void *b, unsigned long c)
148{
149 const __le16 *ptr = b;
150
151 while(c--)
152 __opal_lpc_outw(*(ptr++), p);
153}
154
155static void opal_lpc_outsl(unsigned long p, const void *b, unsigned long c)
156{
157 const __le32 *ptr = b;
158
159 while(c--)
160 __opal_lpc_outl(*(ptr++), p);
161}
162
163static const struct ppc_pci_io opal_lpc_io = {
164 .inb = opal_lpc_inb,
165 .inw = opal_lpc_inw,
166 .inl = opal_lpc_inl,
167 .outb = opal_lpc_outb,
168 .outw = opal_lpc_outw,
169 .outl = opal_lpc_outl,
170 .insb = opal_lpc_insb,
171 .insw = opal_lpc_insw,
172 .insl = opal_lpc_insl,
173 .outsb = opal_lpc_outsb,
174 .outsw = opal_lpc_outsw,
175 .outsl = opal_lpc_outsl,
176};
177
178void opal_lpc_init(void)
179{
180 struct device_node *np;
181
182 /*
183 * Look for a Power8 LPC bus tagged as "primary",
184 * we currently support only one though the OPAL APIs
185 * support any number.
186 */
187 for_each_compatible_node(np, NULL, "ibm,power8-lpc") {
188 if (!of_device_is_available(np))
189 continue;
190 if (!of_get_property(np, "primary", NULL))
191 continue;
192 opal_lpc_chip_id = of_get_ibm_chip_id(np);
193 break;
194 }
195 if (opal_lpc_chip_id < 0)
196 return;
197
198 /* Setup special IO ops */
199 ppc_pci_io = opal_lpc_io;
200 isa_io_special = true;
201
202 pr_info("OPAL: Power8 LPC bus found, chip ID %d\n", opal_lpc_chip_id);
203}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e88863ffb135..8f3844535fbb 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -111,3 +111,8 @@ OPAL_CALL(opal_pci_next_error, OPAL_PCI_NEXT_ERROR);
111OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL); 111OPAL_CALL(opal_pci_poll, OPAL_PCI_POLL);
112OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI); 112OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI);
113OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2); 113OPAL_CALL(opal_pci_get_phb_diag_data2, OPAL_PCI_GET_PHB_DIAG_DATA2);
114OPAL_CALL(opal_xscom_read, OPAL_XSCOM_READ);
115OPAL_CALL(opal_xscom_write, OPAL_XSCOM_WRITE);
116OPAL_CALL(opal_lpc_read, OPAL_LPC_READ);
117OPAL_CALL(opal_lpc_write, OPAL_LPC_WRITE);
118OPAL_CALL(opal_return_cpu, OPAL_RETURN_CPU);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 106301fd2fa5..2911abe550f1 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -380,18 +380,20 @@ static int __init opal_init(void)
380 pr_warn("opal: Node not found\n"); 380 pr_warn("opal: Node not found\n");
381 return -ENODEV; 381 return -ENODEV;
382 } 382 }
383
384 /* Register OPAL consoles if any ports */
383 if (firmware_has_feature(FW_FEATURE_OPALv2)) 385 if (firmware_has_feature(FW_FEATURE_OPALv2))
384 consoles = of_find_node_by_path("/ibm,opal/consoles"); 386 consoles = of_find_node_by_path("/ibm,opal/consoles");
385 else 387 else
386 consoles = of_node_get(opal_node); 388 consoles = of_node_get(opal_node);
387 389 if (consoles) {
388 /* Register serial ports */ 390 for_each_child_of_node(consoles, np) {
389 for_each_child_of_node(consoles, np) { 391 if (strcmp(np->name, "serial"))
390 if (strcmp(np->name, "serial")) 392 continue;
391 continue; 393 of_platform_device_create(np, NULL, NULL);
392 of_platform_device_create(np, NULL, NULL); 394 }
395 of_node_put(consoles);
393 } 396 }
394 of_node_put(consoles);
395 397
396 /* Find all OPAL interrupts and request them */ 398 /* Find all OPAL interrupts and request them */
397 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 399 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
@@ -422,7 +424,7 @@ void opal_shutdown(void)
422 424
423 for (i = 0; i < opal_irq_count; i++) { 425 for (i = 0; i < opal_irq_count; i++) {
424 if (opal_irqs[i]) 426 if (opal_irqs[i])
425 free_irq(opal_irqs[i], 0); 427 free_irq(opal_irqs[i], NULL);
426 opal_irqs[i] = 0; 428 opal_irqs[i] = 0;
427 } 429 }
428} 430}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index d8140b125e62..74a5a5773b1f 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1104,16 +1104,16 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1104 u64 hub_id, int ioda_type) 1104 u64 hub_id, int ioda_type)
1105{ 1105{
1106 struct pci_controller *hose; 1106 struct pci_controller *hose;
1107 static int primary = 1;
1108 struct pnv_phb *phb; 1107 struct pnv_phb *phb;
1109 unsigned long size, m32map_off, iomap_off, pemap_off; 1108 unsigned long size, m32map_off, iomap_off, pemap_off;
1110 const u64 *prop64; 1109 const u64 *prop64;
1111 const u32 *prop32; 1110 const u32 *prop32;
1111 int len;
1112 u64 phb_id; 1112 u64 phb_id;
1113 void *aux; 1113 void *aux;
1114 long rc; 1114 long rc;
1115 1115
1116 pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name); 1116 pr_info("Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1117 1117
1118 prop64 = of_get_property(np, "ibm,opal-phbid", NULL); 1118 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1119 if (!prop64) { 1119 if (!prop64) {
@@ -1124,20 +1124,31 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1124 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 1124 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1125 1125
1126 phb = alloc_bootmem(sizeof(struct pnv_phb)); 1126 phb = alloc_bootmem(sizeof(struct pnv_phb));
1127 if (phb) { 1127 if (!phb) {
1128 memset(phb, 0, sizeof(struct pnv_phb)); 1128 pr_err(" Out of memory !\n");
1129 phb->hose = hose = pcibios_alloc_controller(np); 1129 return;
1130 } 1130 }
1131 if (!phb || !phb->hose) { 1131
1132 pr_err("PCI: Failed to allocate PCI controller for %s\n", 1132 /* Allocate PCI controller */
1133 memset(phb, 0, sizeof(struct pnv_phb));
1134 phb->hose = hose = pcibios_alloc_controller(np);
1135 if (!phb->hose) {
1136 pr_err(" Can't allocate PCI controller for %s\n",
1133 np->full_name); 1137 np->full_name);
1138 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb));
1134 return; 1139 return;
1135 } 1140 }
1136 1141
1137 spin_lock_init(&phb->lock); 1142 spin_lock_init(&phb->lock);
1138 /* XXX Use device-tree */ 1143 prop32 = of_get_property(np, "bus-range", &len);
1139 hose->first_busno = 0; 1144 if (prop32 && len == 8) {
1140 hose->last_busno = 0xff; 1145 hose->first_busno = prop32[0];
1146 hose->last_busno = prop32[1];
1147 } else {
1148 pr_warn(" Broken <bus-range> on %s\n", np->full_name);
1149 hose->first_busno = 0;
1150 hose->last_busno = 0xff;
1151 }
1141 hose->private_data = phb; 1152 hose->private_data = phb;
1142 phb->hub_id = hub_id; 1153 phb->hub_id = hub_id;
1143 phb->opal_id = phb_id; 1154 phb->opal_id = phb_id;
@@ -1152,8 +1163,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1152 phb->model = PNV_PHB_MODEL_UNKNOWN; 1163 phb->model = PNV_PHB_MODEL_UNKNOWN;
1153 1164
1154 /* Parse 32-bit and IO ranges (if any) */ 1165 /* Parse 32-bit and IO ranges (if any) */
1155 pci_process_bridge_OF_ranges(phb->hose, np, primary); 1166 pci_process_bridge_OF_ranges(hose, np, !hose->global_number);
1156 primary = 0;
1157 1167
1158 /* Get registers */ 1168 /* Get registers */
1159 phb->regs = of_iomap(np, 0); 1169 phb->regs = of_iomap(np, 0);
@@ -1177,22 +1187,23 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1177 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe; 1187 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1178 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */ 1188 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1179 1189
1180 /* Allocate aux data & arrays 1190 /* Allocate aux data & arrays. We don't have IO ports on PHB3 */
1181 *
1182 * XXX TODO: Don't allocate io segmap on PHB3
1183 */
1184 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long)); 1191 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1185 m32map_off = size; 1192 m32map_off = size;
1186 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]); 1193 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1187 iomap_off = size; 1194 iomap_off = size;
1188 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]); 1195 if (phb->type == PNV_PHB_IODA1) {
1196 iomap_off = size;
1197 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1198 }
1189 pemap_off = size; 1199 pemap_off = size;
1190 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 1200 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1191 aux = alloc_bootmem(size); 1201 aux = alloc_bootmem(size);
1192 memset(aux, 0, size); 1202 memset(aux, 0, size);
1193 phb->ioda.pe_alloc = aux; 1203 phb->ioda.pe_alloc = aux;
1194 phb->ioda.m32_segmap = aux + m32map_off; 1204 phb->ioda.m32_segmap = aux + m32map_off;
1195 phb->ioda.io_segmap = aux + iomap_off; 1205 if (phb->type == PNV_PHB_IODA1)
1206 phb->ioda.io_segmap = aux + iomap_off;
1196 phb->ioda.pe_array = aux + pemap_off; 1207 phb->ioda.pe_array = aux + pemap_off;
1197 set_bit(0, phb->ioda.pe_alloc); 1208 set_bit(0, phb->ioda.pe_alloc);
1198 1209
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index a1c6f83fc391..de6819be1f95 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -15,4 +15,6 @@ static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { } 15static inline void pnv_pci_shutdown(void) { }
16#endif 16#endif
17 17
18extern void pnv_lpc_init(void);
19
18#endif /* _POWERNV_H */ 20#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 84438af96c05..e239dcfa224c 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -31,6 +31,7 @@
31#include <asm/xics.h> 31#include <asm/xics.h>
32#include <asm/rtas.h> 32#include <asm/rtas.h>
33#include <asm/opal.h> 33#include <asm/opal.h>
34#include <asm/kexec.h>
34 35
35#include "powernv.h" 36#include "powernv.h"
36 37
@@ -54,6 +55,12 @@ static void __init pnv_setup_arch(void)
54 55
55static void __init pnv_init_early(void) 56static void __init pnv_init_early(void)
56{ 57{
58 /*
59 * Initialize the LPC bus now so that legacy serial
60 * ports can be found on it
61 */
62 opal_lpc_init();
63
57#ifdef CONFIG_HVC_OPAL 64#ifdef CONFIG_HVC_OPAL
58 if (firmware_has_feature(FW_FEATURE_OPAL)) 65 if (firmware_has_feature(FW_FEATURE_OPAL))
59 hvc_opal_init_early(); 66 hvc_opal_init_early();
@@ -147,6 +154,16 @@ static void pnv_shutdown(void)
147static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) 154static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
148{ 155{
149 xics_kexec_teardown_cpu(secondary); 156 xics_kexec_teardown_cpu(secondary);
157
158 /* Return secondary CPUs to firmware on OPAL v3 */
159 if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
160 mb();
161 get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
162 mb();
163
164 /* Return the CPU to OPAL */
165 opal_return_cpu();
166 }
150} 167}
151#endif /* CONFIG_KEXEC */ 168#endif /* CONFIG_KEXEC */
152 169
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 89e3857af4e0..908672bdcea6 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -46,22 +46,6 @@ static void pnv_smp_setup_cpu(int cpu)
46 xics_setup_cpu(); 46 xics_setup_cpu();
47} 47}
48 48
49static int pnv_smp_cpu_bootable(unsigned int nr)
50{
51 /* Special case - we inhibit secondary thread startup
52 * during boot if the user requests it.
53 */
54 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
55 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
56 return 0;
57 if (smt_enabled_at_boot
58 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
59 return 0;
60 }
61
62 return 1;
63}
64
65int pnv_smp_kick_cpu(int nr) 49int pnv_smp_kick_cpu(int nr)
66{ 50{
67 unsigned int pcpu = get_hard_smp_processor_id(nr); 51 unsigned int pcpu = get_hard_smp_processor_id(nr);
@@ -195,7 +179,7 @@ static struct smp_ops_t pnv_smp_ops = {
195 .probe = xics_smp_probe, 179 .probe = xics_smp_probe,
196 .kick_cpu = pnv_smp_kick_cpu, 180 .kick_cpu = pnv_smp_kick_cpu,
197 .setup_cpu = pnv_smp_setup_cpu, 181 .setup_cpu = pnv_smp_setup_cpu,
198 .cpu_bootable = pnv_smp_cpu_bootable, 182 .cpu_bootable = smp_generic_cpu_bootable,
199#ifdef CONFIG_HOTPLUG_CPU 183#ifdef CONFIG_HOTPLUG_CPU
200 .cpu_disable = pnv_smp_cpu_disable, 184 .cpu_disable = pnv_smp_cpu_disable,
201 .cpu_die = generic_cpu_die, 185 .cpu_die = generic_cpu_die,
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index cba1e6be68e5..ce73ce865613 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -90,7 +90,7 @@ static int __init ps3_rtc_init(void)
90 90
91 pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0); 91 pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
92 92
93 return PTR_RET(pdev); 93 return PTR_ERR_OR_ZERO(pdev);
94} 94}
95 95
96module_init(ps3_rtc_init); 96module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 8ae010381316..6c61ec5ee914 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_CMM) += cmm.o
22obj-$(CONFIG_DTL) += dtl.o 22obj-$(CONFIG_DTL) += dtl.o
23obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o 23obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
24obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o 24obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
25obj-$(CONFIG_LPARCFG) += lparcfg.o
25 26
26ifeq ($(CONFIG_PPC_PSERIES),y) 27ifeq ($(CONFIG_PPC_PSERIES),y)
27obj-$(CONFIG_SUSPEND) += suspend.o 28obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c638535753df..1e561bef459b 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -40,8 +40,7 @@
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42#include <linux/memory.h> 42#include <linux/memory.h>
43 43#include <asm/plpar_wrappers.h>
44#include "plpar_wrappers.h"
45 44
46#define CMM_DRIVER_VERSION "1.0.0" 45#define CMM_DRIVER_VERSION "1.0.0"
47#define CMM_DEFAULT_DELAY 1 46#define CMM_DEFAULT_DELAY 1
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a1a7b9a67ffd..7cfdaae1721a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -63,26 +63,32 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
63 return prop; 63 return prop;
64} 64}
65 65
66static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) 66static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
67 const char *path)
67{ 68{
68 struct device_node *dn; 69 struct device_node *dn;
69 char *name; 70 char *name;
70 71
72 /* If parent node path is "/" advance path to NULL terminator to
73 * prevent double leading slashs in full_name.
74 */
75 if (!path[1])
76 path++;
77
71 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 78 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
72 if (!dn) 79 if (!dn)
73 return NULL; 80 return NULL;
74 81
75 /* The configure connector reported name does not contain a
76 * preceding '/', so we allocate a buffer large enough to
77 * prepend this to the full_name.
78 */
79 name = (char *)ccwa + ccwa->name_offset; 82 name = (char *)ccwa + ccwa->name_offset;
80 dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); 83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
81 if (!dn->full_name) { 84 if (!dn->full_name) {
82 kfree(dn); 85 kfree(dn);
83 return NULL; 86 return NULL;
84 } 87 }
85 88
89 of_node_set_flag(dn, OF_DYNAMIC);
90 kref_init(&dn->kref);
91
86 return dn; 92 return dn;
87} 93}
88 94
@@ -120,7 +126,8 @@ void dlpar_free_cc_nodes(struct device_node *dn)
120#define CALL_AGAIN -2 126#define CALL_AGAIN -2
121#define ERR_CFG_USE -9003 127#define ERR_CFG_USE -9003
122 128
123struct device_node *dlpar_configure_connector(u32 drc_index) 129struct device_node *dlpar_configure_connector(u32 drc_index,
130 struct device_node *parent)
124{ 131{
125 struct device_node *dn; 132 struct device_node *dn;
126 struct device_node *first_dn = NULL; 133 struct device_node *first_dn = NULL;
@@ -129,6 +136,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
129 struct property *last_property = NULL; 136 struct property *last_property = NULL;
130 struct cc_workarea *ccwa; 137 struct cc_workarea *ccwa;
131 char *data_buf; 138 char *data_buf;
139 const char *parent_path = parent->full_name;
132 int cc_token; 140 int cc_token;
133 int rc = -1; 141 int rc = -1;
134 142
@@ -162,7 +170,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
162 break; 170 break;
163 171
164 case NEXT_SIBLING: 172 case NEXT_SIBLING:
165 dn = dlpar_parse_cc_node(ccwa); 173 dn = dlpar_parse_cc_node(ccwa, parent_path);
166 if (!dn) 174 if (!dn)
167 goto cc_error; 175 goto cc_error;
168 176
@@ -172,13 +180,17 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
172 break; 180 break;
173 181
174 case NEXT_CHILD: 182 case NEXT_CHILD:
175 dn = dlpar_parse_cc_node(ccwa); 183 if (first_dn)
184 parent_path = last_dn->full_name;
185
186 dn = dlpar_parse_cc_node(ccwa, parent_path);
176 if (!dn) 187 if (!dn)
177 goto cc_error; 188 goto cc_error;
178 189
179 if (!first_dn) 190 if (!first_dn) {
191 dn->parent = parent;
180 first_dn = dn; 192 first_dn = dn;
181 else { 193 } else {
182 dn->parent = last_dn; 194 dn->parent = last_dn;
183 if (last_dn) 195 if (last_dn)
184 last_dn->child = dn; 196 last_dn->child = dn;
@@ -202,6 +214,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
202 214
203 case PREV_PARENT: 215 case PREV_PARENT:
204 last_dn = last_dn->parent; 216 last_dn = last_dn->parent;
217 parent_path = last_dn->parent->full_name;
205 break; 218 break;
206 219
207 case CALL_AGAIN: 220 case CALL_AGAIN:
@@ -256,8 +269,6 @@ int dlpar_attach_node(struct device_node *dn)
256{ 269{
257 int rc; 270 int rc;
258 271
259 of_node_set_flag(dn, OF_DYNAMIC);
260 kref_init(&dn->kref);
261 dn->parent = derive_parent(dn->full_name); 272 dn->parent = derive_parent(dn->full_name);
262 if (!dn->parent) 273 if (!dn->parent)
263 return -ENOMEM; 274 return -ENOMEM;
@@ -275,8 +286,15 @@ int dlpar_attach_node(struct device_node *dn)
275 286
276int dlpar_detach_node(struct device_node *dn) 287int dlpar_detach_node(struct device_node *dn)
277{ 288{
289 struct device_node *child;
278 int rc; 290 int rc;
279 291
292 child = of_get_next_child(dn, NULL);
293 while (child) {
294 dlpar_detach_node(child);
295 child = of_get_next_child(dn, child);
296 }
297
280 rc = of_detach_node(dn); 298 rc = of_detach_node(dn);
281 if (rc) 299 if (rc)
282 return rc; 300 return rc;
@@ -382,9 +400,8 @@ out:
382 400
383static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 401static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
384{ 402{
385 struct device_node *dn; 403 struct device_node *dn, *parent;
386 unsigned long drc_index; 404 unsigned long drc_index;
387 char *cpu_name;
388 int rc; 405 int rc;
389 406
390 cpu_hotplug_driver_lock(); 407 cpu_hotplug_driver_lock();
@@ -394,25 +411,19 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
394 goto out; 411 goto out;
395 } 412 }
396 413
397 dn = dlpar_configure_connector(drc_index); 414 parent = of_find_node_by_path("/cpus");
398 if (!dn) { 415 if (!parent) {
399 rc = -EINVAL; 416 rc = -ENODEV;
400 goto out; 417 goto out;
401 } 418 }
402 419
403 /* configure-connector reports cpus as living in the base 420 dn = dlpar_configure_connector(drc_index, parent);
404 * directory of the device tree. CPUs actually live in the 421 if (!dn) {
405 * cpus directory so we need to fixup the full_name. 422 rc = -EINVAL;
406 */
407 cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
408 if (!cpu_name) {
409 dlpar_free_cc_nodes(dn);
410 rc = -ENOMEM;
411 goto out; 423 goto out;
412 } 424 }
413 425
414 kfree(dn->full_name); 426 of_node_put(parent);
415 dn->full_name = cpu_name;
416 427
417 rc = dlpar_acquire_drc(drc_index); 428 rc = dlpar_acquire_drc(drc_index);
418 if (rc) { 429 if (rc) {
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 0cc0ac07a55d..5db66f1fbc26 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -29,8 +29,7 @@
29#include <asm/firmware.h> 29#include <asm/firmware.h>
30#include <asm/lppaca.h> 30#include <asm/lppaca.h>
31#include <asm/debug.h> 31#include <asm/debug.h>
32 32#include <asm/plpar_wrappers.h>
33#include "plpar_wrappers.h"
34 33
35struct dtl { 34struct dtl {
36 struct dtl_entry *buf; 35 struct dtl_entry *buf;
@@ -87,7 +86,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
87 barrier(); 86 barrier();
88 87
89 /* check for hypervisor ring buffer overflow, ignore this entry if so */ 88 /* check for hypervisor ring buffer overflow, ignore this entry if so */
90 if (index + N_DISPATCH_LOG < vpa->dtl_idx) 89 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
91 return; 90 return;
92 91
93 ++wp; 92 ++wp;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 217ca5c75b20..82789e79e539 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,7 +30,8 @@
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/vdso_datapage.h> 31#include <asm/vdso_datapage.h>
32#include <asm/xics.h> 32#include <asm/xics.h>
33#include "plpar_wrappers.h" 33#include <asm/plpar_wrappers.h>
34
34#include "offline_states.h" 35#include "offline_states.h"
35 36
36/* This version can't take the spinlock, because it never returns */ 37/* This version can't take the spinlock, because it never returns */
@@ -123,7 +124,7 @@ static void pseries_mach_cpu_die(void)
123 cede_latency_hint = 2; 124 cede_latency_hint = 2;
124 125
125 get_lppaca()->idle = 1; 126 get_lppaca()->idle = 1;
126 if (!get_lppaca()->shared_proc) 127 if (!lppaca_shared_proc(get_lppaca()))
127 get_lppaca()->donate_dedicated_cpu = 1; 128 get_lppaca()->donate_dedicated_cpu = 1;
128 129
129 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 130 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
@@ -137,7 +138,7 @@ static void pseries_mach_cpu_die(void)
137 138
138 local_irq_disable(); 139 local_irq_disable();
139 140
140 if (!get_lppaca()->shared_proc) 141 if (!lppaca_shared_proc(get_lppaca()))
141 get_lppaca()->donate_dedicated_cpu = 0; 142 get_lppaca()->donate_dedicated_cpu = 0;
142 get_lppaca()->idle = 0; 143 get_lppaca()->idle = 0;
143 144
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index b344f94b0400..849b29b3e9ae 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -28,7 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <asm/hvcall.h> 29#include <asm/hvcall.h>
30#include <asm/hvconsole.h> 30#include <asm/hvconsole.h>
31#include "plpar_wrappers.h" 31#include <asm/plpar_wrappers.h>
32 32
33/** 33/**
34 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper 34 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -40,10 +40,16 @@
40 */ 40 */
41int hvc_get_chars(uint32_t vtermno, char *buf, int count) 41int hvc_get_chars(uint32_t vtermno, char *buf, int count)
42{ 42{
43 unsigned long got; 43 long ret;
44 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
45 unsigned long *lbuf = (unsigned long *)buf;
46
47 ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
48 lbuf[0] = be64_to_cpu(retbuf[1]);
49 lbuf[1] = be64_to_cpu(retbuf[2]);
44 50
45 if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS) 51 if (ret == H_SUCCESS)
46 return got; 52 return retbuf[0];
47 53
48 return 0; 54 return 0;
49} 55}
@@ -69,8 +75,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
69 if (count > MAX_VIO_PUT_CHARS) 75 if (count > MAX_VIO_PUT_CHARS)
70 count = MAX_VIO_PUT_CHARS; 76 count = MAX_VIO_PUT_CHARS;
71 77
72 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0], 78 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
73 lbuf[1]); 79 cpu_to_be64(lbuf[0]),
80 cpu_to_be64(lbuf[1]));
74 if (ret == H_SUCCESS) 81 if (ret == H_SUCCESS)
75 return count; 82 return count;
76 if (ret == H_BUSY) 83 if (ret == H_BUSY)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 23fc1dcf4434..0307901e4132 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -48,8 +48,7 @@
48#include <asm/ppc-pci.h> 48#include <asm/ppc-pci.h>
49#include <asm/udbg.h> 49#include <asm/udbg.h>
50#include <asm/mmzone.h> 50#include <asm/mmzone.h>
51 51#include <asm/plpar_wrappers.h>
52#include "plpar_wrappers.h"
53 52
54 53
55static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, 54static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
@@ -530,7 +529,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
530static void iommu_table_setparms_lpar(struct pci_controller *phb, 529static void iommu_table_setparms_lpar(struct pci_controller *phb,
531 struct device_node *dn, 530 struct device_node *dn,
532 struct iommu_table *tbl, 531 struct iommu_table *tbl,
533 const void *dma_window) 532 const __be32 *dma_window)
534{ 533{
535 unsigned long offset, size; 534 unsigned long offset, size;
536 535
@@ -630,7 +629,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
630 struct iommu_table *tbl; 629 struct iommu_table *tbl;
631 struct device_node *dn, *pdn; 630 struct device_node *dn, *pdn;
632 struct pci_dn *ppci; 631 struct pci_dn *ppci;
633 const void *dma_window = NULL; 632 const __be32 *dma_window = NULL;
634 633
635 dn = pci_bus_to_OF_node(bus); 634 dn = pci_bus_to_OF_node(bus);
636 635
@@ -1152,7 +1151,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1152{ 1151{
1153 struct device_node *pdn, *dn; 1152 struct device_node *pdn, *dn;
1154 struct iommu_table *tbl; 1153 struct iommu_table *tbl;
1155 const void *dma_window = NULL; 1154 const __be32 *dma_window = NULL;
1156 struct pci_dn *pci; 1155 struct pci_dn *pci;
1157 1156
1158 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); 1157 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1201,7 +1200,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1201 bool ddw_enabled = false; 1200 bool ddw_enabled = false;
1202 struct device_node *pdn, *dn; 1201 struct device_node *pdn, *dn;
1203 struct pci_dev *pdev; 1202 struct pci_dev *pdev;
1204 const void *dma_window = NULL; 1203 const __be32 *dma_window = NULL;
1205 u64 dma_offset; 1204 u64 dma_offset;
1206 1205
1207 if (!dev->dma_mask) 1206 if (!dev->dma_mask)
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 7d94bdc63d50..13fa95b3aa8b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -17,9 +17,9 @@
17#include <asm/mpic.h> 17#include <asm/mpic.h>
18#include <asm/xics.h> 18#include <asm/xics.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/plpar_wrappers.h>
20 21
21#include "pseries.h" 22#include "pseries.h"
22#include "plpar_wrappers.h"
23 23
24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) 24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
25{ 25{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8bad880bd177..356bc75ca74f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,8 +41,8 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/trace.h> 42#include <asm/trace.h>
43#include <asm/firmware.h> 43#include <asm/firmware.h>
44#include <asm/plpar_wrappers.h>
44 45
45#include "plpar_wrappers.h"
46#include "pseries.h" 46#include "pseries.h"
47 47
48/* Flag bits for H_BULK_REMOVE */ 48/* Flag bits for H_BULK_REMOVE */
@@ -68,6 +68,12 @@ void vpa_init(int cpu)
68 struct paca_struct *pp; 68 struct paca_struct *pp;
69 struct dtl_entry *dtl; 69 struct dtl_entry *dtl;
70 70
71 /*
72 * The spec says it "may be problematic" if CPU x registers the VPA of
73 * CPU y. We should never do that, but wail if we ever do.
74 */
75 WARN_ON(cpu != smp_processor_id());
76
71 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 77 if (cpu_has_feature(CPU_FTR_ALTIVEC))
72 lppaca_of(cpu).vmxregs_in_use = 1; 78 lppaca_of(cpu).vmxregs_in_use = 1;
73 79
@@ -106,7 +112,7 @@ void vpa_init(int cpu)
106 lppaca_of(cpu).dtl_idx = 0; 112 lppaca_of(cpu).dtl_idx = 0;
107 113
108 /* hypervisor reads buffer length from this field */ 114 /* hypervisor reads buffer length from this field */
109 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 115 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
110 ret = register_dtl(hwcpu, __pa(dtl)); 116 ret = register_dtl(hwcpu, __pa(dtl));
111 if (ret) 117 if (ret)
112 pr_err("WARNING: DTL registration of cpu %d (hw %d) " 118 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
@@ -724,7 +730,7 @@ int h_get_mpp(struct hvcall_mpp_data *mpp_data)
724 730
725 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; 731 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
726 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; 732 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
727 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; 733 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
728 734
729 mpp_data->pool_size = retbuf[4]; 735 mpp_data->pool_size = retbuf[4];
730 mpp_data->loan_request = retbuf[5]; 736 mpp_data->loan_request = retbuf[5];
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index d92f3871e9cf..e738007eae64 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -35,7 +35,13 @@
35#include <asm/vdso_datapage.h> 35#include <asm/vdso_datapage.h>
36#include <asm/vio.h> 36#include <asm/vio.h>
37#include <asm/mmu.h> 37#include <asm/mmu.h>
38#include <asm/machdep.h>
38 39
40
41/*
42 * This isn't a module but we expose that to userspace
43 * via /proc so leave the definitions here
44 */
39#define MODULE_VERS "1.9" 45#define MODULE_VERS "1.9"
40#define MODULE_NAME "lparcfg" 46#define MODULE_NAME "lparcfg"
41 47
@@ -165,7 +171,7 @@ static void parse_ppp_data(struct seq_file *m)
165 ppp_data.active_system_procs); 171 ppp_data.active_system_procs);
166 172
167 /* pool related entries are appropriate for shared configs */ 173 /* pool related entries are appropriate for shared configs */
168 if (lppaca_of(0).shared_proc) { 174 if (lppaca_shared_proc(get_lppaca())) {
169 unsigned long pool_idle_time, pool_procs; 175 unsigned long pool_idle_time, pool_procs;
170 176
171 seq_printf(m, "pool=%d\n", ppp_data.pool_num); 177 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
@@ -387,8 +393,8 @@ static void pseries_cmo_data(struct seq_file *m)
387 return; 393 return;
388 394
389 for_each_possible_cpu(cpu) { 395 for_each_possible_cpu(cpu) {
390 cmo_faults += lppaca_of(cpu).cmo_faults; 396 cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
391 cmo_fault_time += lppaca_of(cpu).cmo_fault_time; 397 cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
392 } 398 }
393 399
394 seq_printf(m, "cmo_faults=%lu\n", cmo_faults); 400 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
@@ -406,8 +412,9 @@ static void splpar_dispatch_data(struct seq_file *m)
406 unsigned long dispatch_dispersions = 0; 412 unsigned long dispatch_dispersions = 0;
407 413
408 for_each_possible_cpu(cpu) { 414 for_each_possible_cpu(cpu) {
409 dispatches += lppaca_of(cpu).yield_count; 415 dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
410 dispatch_dispersions += lppaca_of(cpu).dispersion_count; 416 dispatch_dispersions +=
417 be32_to_cpu(lppaca_of(cpu).dispersion_count);
411 } 418 }
412 419
413 seq_printf(m, "dispatches=%lu\n", dispatches); 420 seq_printf(m, "dispatches=%lu\n", dispatches);
@@ -418,7 +425,8 @@ static void parse_em_data(struct seq_file *m)
418{ 425{
419 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 426 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
420 427
421 if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) 428 if (firmware_has_feature(FW_FEATURE_LPAR) &&
429 plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
422 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); 430 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
423} 431}
424 432
@@ -473,7 +481,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
473 seq_printf(m, "partition_potential_processors=%d\n", 481 seq_printf(m, "partition_potential_processors=%d\n",
474 partition_potential_processors); 482 partition_potential_processors);
475 483
476 seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc); 484 seq_printf(m, "shared_processor_mode=%d\n",
485 lppaca_shared_proc(get_lppaca()));
477 486
478 seq_printf(m, "slb_size=%d\n", mmu_slb_size); 487 seq_printf(m, "slb_size=%d\n", mmu_slb_size);
479 488
@@ -677,7 +686,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
677} 686}
678 687
679static const struct file_operations lparcfg_fops = { 688static const struct file_operations lparcfg_fops = {
680 .owner = THIS_MODULE,
681 .read = seq_read, 689 .read = seq_read,
682 .write = lparcfg_write, 690 .write = lparcfg_write,
683 .open = lparcfg_open, 691 .open = lparcfg_open,
@@ -699,14 +707,4 @@ static int __init lparcfg_init(void)
699 } 707 }
700 return 0; 708 return 0;
701} 709}
702 710machine_device_initcall(pseries, lparcfg_init);
703static void __exit lparcfg_cleanup(void)
704{
705 remove_proc_subtree("powerpc/lparcfg", NULL);
706}
707
708module_init(lparcfg_init);
709module_exit(lparcfg_cleanup);
710MODULE_DESCRIPTION("Interface for LPAR configuration data");
711MODULE_AUTHOR("Dave Engebretsen");
712MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 3d01eee9ffb1..cde4e0a095ae 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -28,7 +28,7 @@ struct update_props_workarea {
28 u32 state; 28 u32 state;
29 u64 reserved; 29 u64 reserved;
30 u32 nprops; 30 u32 nprops;
31}; 31} __packed;
32 32
33#define NODE_ACTION_MASK 0xff000000 33#define NODE_ACTION_MASK 0xff000000
34#define NODE_COUNT_MASK 0x00ffffff 34#define NODE_COUNT_MASK 0x00ffffff
@@ -62,6 +62,7 @@ static int delete_dt_node(u32 phandle)
62 return -ENOENT; 62 return -ENOENT;
63 63
64 dlpar_detach_node(dn); 64 dlpar_detach_node(dn);
65 of_node_put(dn);
65 return 0; 66 return 0;
66} 67}
67 68
@@ -119,7 +120,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
119 120
120 if (!more) { 121 if (!more) {
121 of_update_property(dn, new_prop); 122 of_update_property(dn, new_prop);
122 new_prop = NULL; 123 *prop = NULL;
123 } 124 }
124 125
125 return 0; 126 return 0;
@@ -130,7 +131,7 @@ static int update_dt_node(u32 phandle, s32 scope)
130 struct update_props_workarea *upwa; 131 struct update_props_workarea *upwa;
131 struct device_node *dn; 132 struct device_node *dn;
132 struct property *prop = NULL; 133 struct property *prop = NULL;
133 int i, rc; 134 int i, rc, rtas_rc;
134 char *prop_data; 135 char *prop_data;
135 char *rtas_buf; 136 char *rtas_buf;
136 int update_properties_token; 137 int update_properties_token;
@@ -154,25 +155,26 @@ static int update_dt_node(u32 phandle, s32 scope)
154 upwa->phandle = phandle; 155 upwa->phandle = phandle;
155 156
156 do { 157 do {
157 rc = mobility_rtas_call(update_properties_token, rtas_buf, 158 rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
158 scope); 159 scope);
159 if (rc < 0) 160 if (rtas_rc < 0)
160 break; 161 break;
161 162
162 prop_data = rtas_buf + sizeof(*upwa); 163 prop_data = rtas_buf + sizeof(*upwa);
163 164
164 /* The first element of the buffer is the path of the node 165 /* On the first call to ibm,update-properties for a node the
165 * being updated in the form of a 8 byte string length 166 * the first property value descriptor contains an empty
166 * followed by the string. Skip past this to get to the 167 * property name, the property value length encoded as u32,
167 * properties being updated. 168 * and the property value is the node path being updated.
168 */ 169 */
169 vd = *prop_data++; 170 if (*prop_data == 0) {
170 prop_data += vd; 171 prop_data++;
172 vd = *(u32 *)prop_data;
173 prop_data += vd + sizeof(vd);
174 upwa->nprops--;
175 }
171 176
172 /* The path we skipped over is counted as one of the elements 177 for (i = 0; i < upwa->nprops; i++) {
173 * returned so start counting at one.
174 */
175 for (i = 1; i < upwa->nprops; i++) {
176 char *prop_name; 178 char *prop_name;
177 179
178 prop_name = prop_data; 180 prop_name = prop_data;
@@ -202,7 +204,7 @@ static int update_dt_node(u32 phandle, s32 scope)
202 prop_data += vd; 204 prop_data += vd;
203 } 205 }
204 } 206 }
205 } while (rc == 1); 207 } while (rtas_rc == 1);
206 208
207 of_node_put(dn); 209 of_node_put(dn);
208 kfree(rtas_buf); 210 kfree(rtas_buf);
@@ -215,17 +217,14 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
215 struct device_node *parent_dn; 217 struct device_node *parent_dn;
216 int rc; 218 int rc;
217 219
218 dn = dlpar_configure_connector(drc_index); 220 parent_dn = of_find_node_by_phandle(parent_phandle);
219 if (!dn) 221 if (!parent_dn)
220 return -ENOENT; 222 return -ENOENT;
221 223
222 parent_dn = of_find_node_by_phandle(parent_phandle); 224 dn = dlpar_configure_connector(drc_index, parent_dn);
223 if (!parent_dn) { 225 if (!dn)
224 dlpar_free_cc_nodes(dn);
225 return -ENOENT; 226 return -ENOENT;
226 }
227 227
228 dn->parent = parent_dn;
229 rc = dlpar_attach_node(dn); 228 rc = dlpar_attach_node(dn);
230 if (rc) 229 if (rc)
231 dlpar_free_cc_nodes(dn); 230 dlpar_free_cc_nodes(dn);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a44551..d276cd3edd8f 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -539,65 +539,6 @@ static int zip_oops(size_t text_len)
539} 539}
540 540
541#ifdef CONFIG_PSTORE 541#ifdef CONFIG_PSTORE
542/* Derived from logfs_uncompress */
543int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
544{
545 int err, ret;
546
547 ret = -EIO;
548 err = zlib_inflateInit(&stream);
549 if (err != Z_OK)
550 goto error;
551
552 stream.next_in = in;
553 stream.avail_in = inlen;
554 stream.total_in = 0;
555 stream.next_out = out;
556 stream.avail_out = outlen;
557 stream.total_out = 0;
558
559 err = zlib_inflate(&stream, Z_FINISH);
560 if (err != Z_STREAM_END)
561 goto error;
562
563 err = zlib_inflateEnd(&stream);
564 if (err != Z_OK)
565 goto error;
566
567 ret = stream.total_out;
568error:
569 return ret;
570}
571
572static int unzip_oops(char *oops_buf, char *big_buf)
573{
574 struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
575 u64 timestamp = oops_hdr->timestamp;
576 char *big_oops_data = NULL;
577 char *oops_data_buf = NULL;
578 size_t big_oops_data_sz;
579 int unzipped_len;
580
581 big_oops_data = big_buf + sizeof(struct oops_log_info);
582 big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
583 oops_data_buf = oops_buf + sizeof(struct oops_log_info);
584
585 unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
586 oops_hdr->report_length,
587 big_oops_data_sz);
588
589 if (unzipped_len < 0) {
590 pr_err("nvram: decompression failed; returned %d\n",
591 unzipped_len);
592 return -1;
593 }
594 oops_hdr = (struct oops_log_info *)big_buf;
595 oops_hdr->version = OOPS_HDR_VERSION;
596 oops_hdr->report_length = (u16) unzipped_len;
597 oops_hdr->timestamp = timestamp;
598 return 0;
599}
600
601static int nvram_pstore_open(struct pstore_info *psi) 542static int nvram_pstore_open(struct pstore_info *psi)
602{ 543{
603 /* Reset the iterator to start reading partitions again */ 544 /* Reset the iterator to start reading partitions again */
@@ -613,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
613 * @part: pstore writes data to registered buffer in parts, 554 * @part: pstore writes data to registered buffer in parts,
614 * part number will indicate the same. 555 * part number will indicate the same.
615 * @count: Indicates oops count 556 * @count: Indicates oops count
616 * @hsize: Size of header added by pstore 557 * @compressed: Flag to indicate the log is compressed
617 * @size: number of bytes written to the registered buffer 558 * @size: number of bytes written to the registered buffer
618 * @psi: registered pstore_info structure 559 * @psi: registered pstore_info structure
619 * 560 *
@@ -624,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
624static int nvram_pstore_write(enum pstore_type_id type, 565static int nvram_pstore_write(enum pstore_type_id type,
625 enum kmsg_dump_reason reason, 566 enum kmsg_dump_reason reason,
626 u64 *id, unsigned int part, int count, 567 u64 *id, unsigned int part, int count,
627 size_t hsize, size_t size, 568 bool compressed, size_t size,
628 struct pstore_info *psi) 569 struct pstore_info *psi)
629{ 570{
630 int rc; 571 int rc;
@@ -640,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type,
640 oops_hdr->report_length = (u16) size; 581 oops_hdr->report_length = (u16) size;
641 oops_hdr->timestamp = get_seconds(); 582 oops_hdr->timestamp = get_seconds();
642 583
643 if (big_oops_buf) { 584 if (compressed)
644 rc = zip_oops(size); 585 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
645 /*
646 * If compression fails copy recent log messages from
647 * big_oops_buf to oops_data.
648 */
649 if (rc != 0) {
650 size_t diff = size - oops_data_sz + hsize;
651
652 if (size > oops_data_sz) {
653 memcpy(oops_data, big_oops_buf, hsize);
654 memcpy(oops_data + hsize, big_oops_buf + diff,
655 oops_data_sz - hsize);
656
657 oops_hdr->report_length = (u16) oops_data_sz;
658 } else
659 memcpy(oops_data, big_oops_buf, size);
660 } else
661 err_type = ERR_TYPE_KERNEL_PANIC_GZ;
662 }
663 586
664 rc = nvram_write_os_partition(&oops_log_partition, oops_buf, 587 rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
665 (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, 588 (int) (sizeof(*oops_hdr) + size), err_type, count);
666 count);
667 589
668 if (rc != 0) 590 if (rc != 0)
669 return rc; 591 return rc;
@@ -679,16 +601,15 @@ static int nvram_pstore_write(enum pstore_type_id type,
679 */ 601 */
680static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, 602static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
681 int *count, struct timespec *time, char **buf, 603 int *count, struct timespec *time, char **buf,
682 struct pstore_info *psi) 604 bool *compressed, struct pstore_info *psi)
683{ 605{
684 struct oops_log_info *oops_hdr; 606 struct oops_log_info *oops_hdr;
685 unsigned int err_type, id_no, size = 0; 607 unsigned int err_type, id_no, size = 0;
686 struct nvram_os_partition *part = NULL; 608 struct nvram_os_partition *part = NULL;
687 char *buff = NULL, *big_buff = NULL; 609 char *buff = NULL;
688 int rc, sig = 0; 610 int sig = 0;
689 loff_t p; 611 loff_t p;
690 612
691read_partition:
692 read_type++; 613 read_type++;
693 614
694 switch (nvram_type_ids[read_type]) { 615 switch (nvram_type_ids[read_type]) {
@@ -749,30 +670,32 @@ read_partition:
749 *id = id_no; 670 *id = id_no;
750 671
751 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { 672 if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
752 oops_hdr = (struct oops_log_info *)buff; 673 size_t length, hdr_size;
753 *buf = buff + sizeof(*oops_hdr);
754
755 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
756 big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
757 if (!big_buff)
758 return -ENOMEM;
759
760 rc = unzip_oops(buff, big_buff);
761 674
762 if (rc != 0) { 675 oops_hdr = (struct oops_log_info *)buff;
763 kfree(buff); 676 if (oops_hdr->version < OOPS_HDR_VERSION) {
764 kfree(big_buff); 677 /* Old format oops header had 2-byte record size */
765 goto read_partition; 678 hdr_size = sizeof(u16);
766 } 679 length = oops_hdr->version;
767 680 time->tv_sec = 0;
768 oops_hdr = (struct oops_log_info *)big_buff; 681 time->tv_nsec = 0;
769 *buf = big_buff + sizeof(*oops_hdr); 682 } else {
770 kfree(buff); 683 hdr_size = sizeof(*oops_hdr);
684 length = oops_hdr->report_length;
685 time->tv_sec = oops_hdr->timestamp;
686 time->tv_nsec = 0;
771 } 687 }
688 *buf = kmalloc(length, GFP_KERNEL);
689 if (*buf == NULL)
690 return -ENOMEM;
691 memcpy(*buf, buff + hdr_size, length);
692 kfree(buff);
772 693
773 time->tv_sec = oops_hdr->timestamp; 694 if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
774 time->tv_nsec = 0; 695 *compressed = true;
775 return oops_hdr->report_length; 696 else
697 *compressed = false;
698 return length;
776 } 699 }
777 700
778 *buf = buff; 701 *buf = buff;
@@ -791,13 +714,8 @@ static int nvram_pstore_init(void)
791{ 714{
792 int rc = 0; 715 int rc = 0;
793 716
794 if (big_oops_buf) { 717 nvram_pstore_info.buf = oops_data;
795 nvram_pstore_info.buf = big_oops_buf; 718 nvram_pstore_info.bufsize = oops_data_sz;
796 nvram_pstore_info.bufsize = big_oops_buf_sz;
797 } else {
798 nvram_pstore_info.buf = oops_data;
799 nvram_pstore_info.bufsize = oops_data_sz;
800 }
801 719
802 rc = pstore_register(&nvram_pstore_info); 720 rc = pstore_register(&nvram_pstore_info);
803 if (rc != 0) 721 if (rc != 0)
@@ -836,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
836 oops_data = oops_buf + sizeof(struct oops_log_info); 754 oops_data = oops_buf + sizeof(struct oops_log_info);
837 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); 755 oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
838 756
757 rc = nvram_pstore_init();
758
759 if (!rc)
760 return;
761
839 /* 762 /*
840 * Figure compression (preceded by elimination of each line's <n> 763 * Figure compression (preceded by elimination of each line's <n>
841 * severity prefix) will reduce the oops/panic report to at most 764 * severity prefix) will reduce the oops/panic report to at most
@@ -844,8 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
844 big_oops_buf_sz = (oops_data_sz * 100) / 45; 767 big_oops_buf_sz = (oops_data_sz * 100) / 45;
845 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); 768 big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
846 if (big_oops_buf) { 769 if (big_oops_buf) {
847 stream.workspace = kmalloc(zlib_deflate_workspacesize( 770 stream.workspace = kmalloc(zlib_deflate_workspacesize(
848 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); 771 WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
849 if (!stream.workspace) { 772 if (!stream.workspace) {
850 pr_err("nvram: No memory for compression workspace; " 773 pr_err("nvram: No memory for compression workspace; "
851 "skipping compression of %s partition data\n", 774 "skipping compression of %s partition data\n",
@@ -859,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
859 stream.workspace = NULL; 782 stream.workspace = NULL;
860 } 783 }
861 784
862 rc = nvram_pstore_init();
863
864 if (!rc)
865 return;
866
867 rc = kmsg_dump_register(&nvram_kmsg_dumper); 785 rc = kmsg_dump_register(&nvram_kmsg_dumper);
868 if (rc != 0) { 786 if (rc != 0) {
869 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); 787 pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4644efa06941..a166e38bd683 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -18,9 +18,7 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/runlatch.h> 20#include <asm/runlatch.h>
21 21#include <asm/plpar_wrappers.h>
22#include "plpar_wrappers.h"
23#include "pseries.h"
24 22
25struct cpuidle_driver pseries_idle_driver = { 23struct cpuidle_driver pseries_idle_driver = {
26 .name = "pseries_idle", 24 .name = "pseries_idle",
@@ -45,7 +43,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
45 43
46static inline void idle_loop_epilog(unsigned long in_purr) 44static inline void idle_loop_epilog(unsigned long in_purr)
47{ 45{
48 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; 46 u64 wait_cycles;
47
48 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
49 wait_cycles += mfspr(SPRN_PURR) - in_purr;
50 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
49 get_lppaca()->idle = 0; 51 get_lppaca()->idle = 0;
50} 52}
51 53
@@ -308,7 +310,7 @@ static int pseries_idle_probe(void)
308 return -EPERM; 310 return -EPERM;
309 } 311 }
310 312
311 if (get_lppaca()->shared_proc) 313 if (lppaca_shared_proc(get_lppaca()))
312 cpuidle_state_table = shared_states; 314 cpuidle_state_table = shared_states;
313 else 315 else
314 cpuidle_state_table = dedicated_states; 316 cpuidle_state_table = dedicated_states;
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index c2a3a258001c..99219530ea4a 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -56,13 +56,10 @@ extern void hvc_vio_init_early(void);
56/* Dynamic logical Partitioning/Mobility */ 56/* Dynamic logical Partitioning/Mobility */
57extern void dlpar_free_cc_nodes(struct device_node *); 57extern void dlpar_free_cc_nodes(struct device_node *);
58extern void dlpar_free_cc_property(struct property *); 58extern void dlpar_free_cc_property(struct property *);
59extern struct device_node *dlpar_configure_connector(u32); 59extern struct device_node *dlpar_configure_connector(u32, struct device_node *);
60extern int dlpar_attach_node(struct device_node *); 60extern int dlpar_attach_node(struct device_node *);
61extern int dlpar_detach_node(struct device_node *); 61extern int dlpar_detach_node(struct device_node *);
62 62
63/* Snooze Delay, pseries_idle */
64DECLARE_PER_CPU(long, smt_snooze_delay);
65
66/* PCI root bridge prepare function override for pseries */ 63/* PCI root bridge prepare function override for pseries */
67struct pci_host_bridge; 64struct pci_host_bridge;
68int pseries_root_bridge_prepare(struct pci_host_bridge *bridge); 65int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index a91e6dadda2c..92767791f93b 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -108,8 +108,8 @@ err:
108 * energy consumption. 108 * energy consumption.
109 */ 109 */
110 110
111#define FLAGS_MODE1 0x004E200000080E01 111#define FLAGS_MODE1 0x004E200000080E01UL
112#define FLAGS_MODE2 0x004E200000080401 112#define FLAGS_MODE2 0x004E200000080401UL
113#define FLAGS_ACTIVATE 0x100 113#define FLAGS_ACTIVATE 0x100
114 114
115static ssize_t get_best_energy_list(char *page, int activate) 115static ssize_t get_best_energy_list(char *page, int activate)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c8238797c..d64feb3ea0be 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -66,8 +66,8 @@
66#include <asm/firmware.h> 66#include <asm/firmware.h>
67#include <asm/eeh.h> 67#include <asm/eeh.h>
68#include <asm/reg.h> 68#include <asm/reg.h>
69#include <asm/plpar_wrappers.h>
69 70
70#include "plpar_wrappers.h"
71#include "pseries.h" 71#include "pseries.h"
72 72
73int CMO_PrPSP = -1; 73int CMO_PrPSP = -1;
@@ -183,7 +183,7 @@ static void __init pseries_mpic_init_IRQ(void)
183 np = of_find_node_by_path("/"); 183 np = of_find_node_by_path("/");
184 naddr = of_n_addr_cells(np); 184 naddr = of_n_addr_cells(np);
185 opprop = of_get_property(np, "platform-open-pic", &opplen); 185 opprop = of_get_property(np, "platform-open-pic", &opplen);
186 if (opprop != 0) { 186 if (opprop != NULL) {
187 openpic_addr = of_read_number(opprop, naddr); 187 openpic_addr = of_read_number(opprop, naddr);
188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
189 } 189 }
@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
323 get_paca()->lppaca_ptr->dtl_idx = 0; 323 get_paca()->lppaca_ptr->dtl_idx = 0;
324 324
325 /* hypervisor reads buffer length from this field */ 325 /* hypervisor reads buffer length from this field */
326 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 326 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); 327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
328 if (ret) 328 if (ret)
329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " 329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 306643cc9dbc..1c1771a40250 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -43,8 +43,8 @@
43#include <asm/cputhreads.h> 43#include <asm/cputhreads.h>
44#include <asm/xics.h> 44#include <asm/xics.h>
45#include <asm/dbell.h> 45#include <asm/dbell.h>
46#include <asm/plpar_wrappers.h>
46 47
47#include "plpar_wrappers.h"
48#include "pseries.h" 48#include "pseries.h"
49#include "offline_states.h" 49#include "offline_states.h"
50 50
@@ -187,22 +187,6 @@ static int smp_pSeries_kick_cpu(int nr)
187 return 0; 187 return 0;
188} 188}
189 189
190static int smp_pSeries_cpu_bootable(unsigned int nr)
191{
192 /* Special case - we inhibit secondary thread startup
193 * during boot if the user requests it.
194 */
195 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
196 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
197 return 0;
198 if (smt_enabled_at_boot
199 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
200 return 0;
201 }
202
203 return 1;
204}
205
206/* Only used on systems that support multiple IPI mechanisms */ 190/* Only used on systems that support multiple IPI mechanisms */
207static void pSeries_cause_ipi_mux(int cpu, unsigned long data) 191static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
208{ 192{
@@ -237,7 +221,7 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
237 .probe = pSeries_smp_probe, 221 .probe = pSeries_smp_probe,
238 .kick_cpu = smp_pSeries_kick_cpu, 222 .kick_cpu = smp_pSeries_kick_cpu,
239 .setup_cpu = smp_xics_setup_cpu, 223 .setup_cpu = smp_xics_setup_cpu,
240 .cpu_bootable = smp_pSeries_cpu_bootable, 224 .cpu_bootable = smp_generic_cpu_bootable,
241}; 225};
242 226
243/* This is called very early */ 227/* This is called very early */
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
index 62ef21afb89a..a563a8aaf812 100644
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -17,7 +17,6 @@ extern void scom_init_wsp(void);
17extern void a2_setup_smp(void); 17extern void a2_setup_smp(void);
18extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, 18extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
19 struct device_node *np); 19 struct device_node *np);
20extern int smp_a2_cpu_bootable(unsigned int nr);
21extern int smp_a2_kick_cpu(int nr); 20extern int smp_a2_kick_cpu(int nr);
22 21
23extern void opb_pic_init(void); 22extern void opb_pic_init(void);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index ab02db3d02d8..77efbaec7b9c 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -28,6 +28,18 @@
28#include "fsl_msi.h" 28#include "fsl_msi.h"
29#include "fsl_pci.h" 29#include "fsl_pci.h"
30 30
31#define MSIIR_OFFSET_MASK 0xfffff
32#define MSIIR_IBS_SHIFT 0
33#define MSIIR_SRS_SHIFT 5
34#define MSIIR1_IBS_SHIFT 4
35#define MSIIR1_SRS_SHIFT 0
36#define MSI_SRS_MASK 0xf
37#define MSI_IBS_MASK 0x1f
38
39#define msi_hwirq(msi, msir_index, intr_index) \
40 ((msir_index) << (msi)->srs_shift | \
41 ((intr_index) << (msi)->ibs_shift))
42
31static LIST_HEAD(msi_head); 43static LIST_HEAD(msi_head);
32 44
33struct fsl_msi_feature { 45struct fsl_msi_feature {
@@ -80,18 +92,19 @@ static const struct irq_domain_ops fsl_msi_host_ops = {
80 92
81static int fsl_msi_init_allocator(struct fsl_msi *msi_data) 93static int fsl_msi_init_allocator(struct fsl_msi *msi_data)
82{ 94{
83 int rc; 95 int rc, hwirq;
84 96
85 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, 97 rc = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS_MAX,
86 msi_data->irqhost->of_node); 98 msi_data->irqhost->of_node);
87 if (rc) 99 if (rc)
88 return rc; 100 return rc;
89 101
90 rc = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); 102 /*
91 if (rc < 0) { 103 * Reserve all the hwirqs
92 msi_bitmap_free(&msi_data->bitmap); 104 * The available hwirqs will be released in fsl_msi_setup_hwirq()
93 return rc; 105 */
94 } 106 for (hwirq = 0; hwirq < NR_MSI_IRQS_MAX; hwirq++)
107 msi_bitmap_reserve_hwirq(&msi_data->bitmap, hwirq);
95 108
96 return 0; 109 return 0;
97} 110}
@@ -144,8 +157,9 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
144 157
145 msg->data = hwirq; 158 msg->data = hwirq;
146 159
147 pr_debug("%s: allocated srs: %d, ibs: %d\n", 160 pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
148 __func__, hwirq / IRQS_PER_MSI_REG, hwirq % IRQS_PER_MSI_REG); 161 (hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
162 (hwirq >> msi_data->ibs_shift) & MSI_IBS_MASK);
149} 163}
150 164
151static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 165static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -255,7 +269,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
255 269
256 msir_index = cascade_data->index; 270 msir_index = cascade_data->index;
257 271
258 if (msir_index >= NR_MSI_REG) 272 if (msir_index >= NR_MSI_REG_MAX)
259 cascade_irq = NO_IRQ; 273 cascade_irq = NO_IRQ;
260 274
261 irqd_set_chained_irq_inprogress(idata); 275 irqd_set_chained_irq_inprogress(idata);
@@ -285,8 +299,8 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc)
285 intr_index = ffs(msir_value) - 1; 299 intr_index = ffs(msir_value) - 1;
286 300
287 cascade_irq = irq_linear_revmap(msi_data->irqhost, 301 cascade_irq = irq_linear_revmap(msi_data->irqhost,
288 msir_index * IRQS_PER_MSI_REG + 302 msi_hwirq(msi_data, msir_index,
289 intr_index + have_shift); 303 intr_index + have_shift));
290 if (cascade_irq != NO_IRQ) 304 if (cascade_irq != NO_IRQ)
291 generic_handle_irq(cascade_irq); 305 generic_handle_irq(cascade_irq);
292 have_shift += intr_index + 1; 306 have_shift += intr_index + 1;
@@ -316,7 +330,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev)
316 330
317 if (msi->list.prev != NULL) 331 if (msi->list.prev != NULL)
318 list_del(&msi->list); 332 list_del(&msi->list);
319 for (i = 0; i < NR_MSI_REG; i++) { 333 for (i = 0; i < NR_MSI_REG_MAX; i++) {
320 virq = msi->msi_virqs[i]; 334 virq = msi->msi_virqs[i];
321 if (virq != NO_IRQ) { 335 if (virq != NO_IRQ) {
322 cascade_data = irq_get_handler_data(virq); 336 cascade_data = irq_get_handler_data(virq);
@@ -339,7 +353,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
339 int offset, int irq_index) 353 int offset, int irq_index)
340{ 354{
341 struct fsl_msi_cascade_data *cascade_data = NULL; 355 struct fsl_msi_cascade_data *cascade_data = NULL;
342 int virt_msir; 356 int virt_msir, i;
343 357
344 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); 358 virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index);
345 if (virt_msir == NO_IRQ) { 359 if (virt_msir == NO_IRQ) {
@@ -360,6 +374,11 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
360 irq_set_handler_data(virt_msir, cascade_data); 374 irq_set_handler_data(virt_msir, cascade_data);
361 irq_set_chained_handler(virt_msir, fsl_msi_cascade); 375 irq_set_chained_handler(virt_msir, fsl_msi_cascade);
362 376
377 /* Release the hwirqs corresponding to this MSI register */
378 for (i = 0; i < IRQS_PER_MSI_REG; i++)
379 msi_bitmap_free_hwirqs(&msi->bitmap,
380 msi_hwirq(msi, offset, i), 1);
381
363 return 0; 382 return 0;
364} 383}
365 384
@@ -368,14 +387,12 @@ static int fsl_of_msi_probe(struct platform_device *dev)
368{ 387{
369 const struct of_device_id *match; 388 const struct of_device_id *match;
370 struct fsl_msi *msi; 389 struct fsl_msi *msi;
371 struct resource res; 390 struct resource res, msiir;
372 int err, i, j, irq_index, count; 391 int err, i, j, irq_index, count;
373 int rc;
374 const u32 *p; 392 const u32 *p;
375 const struct fsl_msi_feature *features; 393 const struct fsl_msi_feature *features;
376 int len; 394 int len;
377 u32 offset; 395 u32 offset;
378 static const u32 all_avail[] = { 0, NR_MSI_IRQS };
379 396
380 match = of_match_device(fsl_of_msi_ids, &dev->dev); 397 match = of_match_device(fsl_of_msi_ids, &dev->dev);
381 if (!match) 398 if (!match)
@@ -392,7 +409,7 @@ static int fsl_of_msi_probe(struct platform_device *dev)
392 platform_set_drvdata(dev, msi); 409 platform_set_drvdata(dev, msi);
393 410
394 msi->irqhost = irq_domain_add_linear(dev->dev.of_node, 411 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
395 NR_MSI_IRQS, &fsl_msi_host_ops, msi); 412 NR_MSI_IRQS_MAX, &fsl_msi_host_ops, msi);
396 413
397 if (msi->irqhost == NULL) { 414 if (msi->irqhost == NULL) {
398 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 415 dev_err(&dev->dev, "No memory for MSI irqhost\n");
@@ -421,6 +438,16 @@ static int fsl_of_msi_probe(struct platform_device *dev)
421 } 438 }
422 msi->msiir_offset = 439 msi->msiir_offset =
423 features->msiir_offset + (res.start & 0xfffff); 440 features->msiir_offset + (res.start & 0xfffff);
441
442 /*
443 * First read the MSIIR/MSIIR1 offset from dts
444 * On failure use the hardcode MSIIR offset
445 */
446 if (of_address_to_resource(dev->dev.of_node, 1, &msiir))
447 msi->msiir_offset = features->msiir_offset +
448 (res.start & MSIIR_OFFSET_MASK);
449 else
450 msi->msiir_offset = msiir.start & MSIIR_OFFSET_MASK;
424 } 451 }
425 452
426 msi->feature = features->fsl_pic_ip; 453 msi->feature = features->fsl_pic_ip;
@@ -431,42 +458,66 @@ static int fsl_of_msi_probe(struct platform_device *dev)
431 */ 458 */
432 msi->phandle = dev->dev.of_node->phandle; 459 msi->phandle = dev->dev.of_node->phandle;
433 460
434 rc = fsl_msi_init_allocator(msi); 461 err = fsl_msi_init_allocator(msi);
435 if (rc) { 462 if (err) {
436 dev_err(&dev->dev, "Error allocating MSI bitmap\n"); 463 dev_err(&dev->dev, "Error allocating MSI bitmap\n");
437 goto error_out; 464 goto error_out;
438 } 465 }
439 466
440 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); 467 p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len);
441 if (p && len % (2 * sizeof(u32)) != 0) {
442 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
443 __func__);
444 err = -EINVAL;
445 goto error_out;
446 }
447 468
448 if (!p) { 469 if (of_device_is_compatible(dev->dev.of_node, "fsl,mpic-msi-v4.3")) {
449 p = all_avail; 470 msi->srs_shift = MSIIR1_SRS_SHIFT;
450 len = sizeof(all_avail); 471 msi->ibs_shift = MSIIR1_IBS_SHIFT;
451 } 472 if (p)
473 dev_warn(&dev->dev, "%s: dose not support msi-available-ranges property\n",
474 __func__);
475
476 for (irq_index = 0; irq_index < NR_MSI_REG_MSIIR1;
477 irq_index++) {
478 err = fsl_msi_setup_hwirq(msi, dev,
479 irq_index, irq_index);
480 if (err)
481 goto error_out;
482 }
483 } else {
484 static const u32 all_avail[] =
485 { 0, NR_MSI_REG_MSIIR * IRQS_PER_MSI_REG };
452 486
453 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { 487 msi->srs_shift = MSIIR_SRS_SHIFT;
454 if (p[i * 2] % IRQS_PER_MSI_REG || 488 msi->ibs_shift = MSIIR_IBS_SHIFT;
455 p[i * 2 + 1] % IRQS_PER_MSI_REG) { 489
456 printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n", 490 if (p && len % (2 * sizeof(u32)) != 0) {
457 __func__, dev->dev.of_node->full_name, 491 dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n",
458 p[i * 2 + 1], p[i * 2]); 492 __func__);
459 err = -EINVAL; 493 err = -EINVAL;
460 goto error_out; 494 goto error_out;
461 } 495 }
462 496
463 offset = p[i * 2] / IRQS_PER_MSI_REG; 497 if (!p) {
464 count = p[i * 2 + 1] / IRQS_PER_MSI_REG; 498 p = all_avail;
499 len = sizeof(all_avail);
500 }
465 501
466 for (j = 0; j < count; j++, irq_index++) { 502 for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) {
467 err = fsl_msi_setup_hwirq(msi, dev, offset + j, irq_index); 503 if (p[i * 2] % IRQS_PER_MSI_REG ||
468 if (err) 504 p[i * 2 + 1] % IRQS_PER_MSI_REG) {
505 pr_warn("%s: %s: msi available range of %u at %u is not IRQ-aligned\n",
506 __func__, dev->dev.of_node->full_name,
507 p[i * 2 + 1], p[i * 2]);
508 err = -EINVAL;
469 goto error_out; 509 goto error_out;
510 }
511
512 offset = p[i * 2] / IRQS_PER_MSI_REG;
513 count = p[i * 2 + 1] / IRQS_PER_MSI_REG;
514
515 for (j = 0; j < count; j++, irq_index++) {
516 err = fsl_msi_setup_hwirq(msi, dev, offset + j,
517 irq_index);
518 if (err)
519 goto error_out;
520 }
470 } 521 }
471 } 522 }
472 523
@@ -509,6 +560,10 @@ static const struct of_device_id fsl_of_msi_ids[] = {
509 .data = &mpic_msi_feature, 560 .data = &mpic_msi_feature,
510 }, 561 },
511 { 562 {
563 .compatible = "fsl,mpic-msi-v4.3",
564 .data = &mpic_msi_feature,
565 },
566 {
512 .compatible = "fsl,ipic-msi", 567 .compatible = "fsl,ipic-msi",
513 .data = &ipic_msi_feature, 568 .data = &ipic_msi_feature,
514 }, 569 },
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 8225f8653f78..df9aa9fe0933 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -16,9 +16,11 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <asm/msi_bitmap.h> 17#include <asm/msi_bitmap.h>
18 18
19#define NR_MSI_REG 8 19#define NR_MSI_REG_MSIIR 8 /* MSIIR can index 8 MSI registers */
20#define NR_MSI_REG_MSIIR1 16 /* MSIIR1 can index 16 MSI registers */
21#define NR_MSI_REG_MAX NR_MSI_REG_MSIIR1
20#define IRQS_PER_MSI_REG 32 22#define IRQS_PER_MSI_REG 32
21#define NR_MSI_IRQS (NR_MSI_REG * IRQS_PER_MSI_REG) 23#define NR_MSI_IRQS_MAX (NR_MSI_REG_MAX * IRQS_PER_MSI_REG)
22 24
23#define FSL_PIC_IP_MASK 0x0000000F 25#define FSL_PIC_IP_MASK 0x0000000F
24#define FSL_PIC_IP_MPIC 0x00000001 26#define FSL_PIC_IP_MPIC 0x00000001
@@ -31,9 +33,11 @@ struct fsl_msi {
31 unsigned long cascade_irq; 33 unsigned long cascade_irq;
32 34
33 u32 msiir_offset; /* Offset of MSIIR, relative to start of CCSR */ 35 u32 msiir_offset; /* Offset of MSIIR, relative to start of CCSR */
36 u32 ibs_shift; /* Shift of interrupt bit select */
37 u32 srs_shift; /* Shift of the shared interrupt register select */
34 void __iomem *msi_regs; 38 void __iomem *msi_regs;
35 u32 feature; 39 u32 feature;
36 int msi_virqs[NR_MSI_REG]; 40 int msi_virqs[NR_MSI_REG_MAX];
37 41
38 struct msi_bitmap bitmap; 42 struct msi_bitmap bitmap;
39 43
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 46ac1ddea683..ccfb50ddfe38 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -26,11 +26,15 @@
26#include <linux/memblock.h> 26#include <linux/memblock.h>
27#include <linux/log2.h> 27#include <linux/log2.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/uaccess.h>
29 30
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/prom.h> 32#include <asm/prom.h>
32#include <asm/pci-bridge.h> 33#include <asm/pci-bridge.h>
34#include <asm/ppc-pci.h>
33#include <asm/machdep.h> 35#include <asm/machdep.h>
36#include <asm/disassemble.h>
37#include <asm/ppc-opcode.h>
34#include <sysdev/fsl_soc.h> 38#include <sysdev/fsl_soc.h>
35#include <sysdev/fsl_pci.h> 39#include <sysdev/fsl_pci.h>
36 40
@@ -64,7 +68,7 @@ static int fsl_pcie_check_link(struct pci_controller *hose)
64 if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) { 68 if (hose->indirect_type & PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK) {
65 if (hose->ops->read == fsl_indirect_read_config) { 69 if (hose->ops->read == fsl_indirect_read_config) {
66 struct pci_bus bus; 70 struct pci_bus bus;
67 bus.number = 0; 71 bus.number = hose->first_busno;
68 bus.sysdata = hose; 72 bus.sysdata = hose;
69 bus.ops = hose->ops; 73 bus.ops = hose->ops;
70 indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val); 74 indirect_read_config(&bus, 0, PCIE_LTSSM, 4, &val);
@@ -297,10 +301,10 @@ static void setup_pci_atmu(struct pci_controller *hose)
297 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) { 301 if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
298 /* Size window to exact size if power-of-two or one size up */ 302 /* Size window to exact size if power-of-two or one size up */
299 if ((1ull << mem_log) != mem) { 303 if ((1ull << mem_log) != mem) {
304 mem_log++;
300 if ((1ull << mem_log) > mem) 305 if ((1ull << mem_log) > mem)
301 pr_info("%s: Setting PCI inbound window " 306 pr_info("%s: Setting PCI inbound window "
302 "greater than memory size\n", name); 307 "greater than memory size\n", name);
303 mem_log++;
304 } 308 }
305 309
306 piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); 310 piwar |= ((mem_log - 1) & PIWAR_SZ_MASK);
@@ -373,7 +377,9 @@ static void setup_pci_atmu(struct pci_controller *hose)
373 } 377 }
374 378
375 if (hose->dma_window_size < mem) { 379 if (hose->dma_window_size < mem) {
376#ifndef CONFIG_SWIOTLB 380#ifdef CONFIG_SWIOTLB
381 ppc_swiotlb_enable = 1;
382#else
377 pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to " 383 pr_err("%s: ERROR: Memory size exceeds PCI ATMU ability to "
378 "map - enable CONFIG_SWIOTLB to avoid dma errors.\n", 384 "map - enable CONFIG_SWIOTLB to avoid dma errors.\n",
379 name); 385 name);
@@ -868,6 +874,160 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose)
868 return 0; 874 return 0;
869} 875}
870 876
877#ifdef CONFIG_E500
878static int mcheck_handle_load(struct pt_regs *regs, u32 inst)
879{
880 unsigned int rd, ra, rb, d;
881
882 rd = get_rt(inst);
883 ra = get_ra(inst);
884 rb = get_rb(inst);
885 d = get_d(inst);
886
887 switch (get_op(inst)) {
888 case 31:
889 switch (get_xop(inst)) {
890 case OP_31_XOP_LWZX:
891 case OP_31_XOP_LWBRX:
892 regs->gpr[rd] = 0xffffffff;
893 break;
894
895 case OP_31_XOP_LWZUX:
896 regs->gpr[rd] = 0xffffffff;
897 regs->gpr[ra] += regs->gpr[rb];
898 break;
899
900 case OP_31_XOP_LBZX:
901 regs->gpr[rd] = 0xff;
902 break;
903
904 case OP_31_XOP_LBZUX:
905 regs->gpr[rd] = 0xff;
906 regs->gpr[ra] += regs->gpr[rb];
907 break;
908
909 case OP_31_XOP_LHZX:
910 case OP_31_XOP_LHBRX:
911 regs->gpr[rd] = 0xffff;
912 break;
913
914 case OP_31_XOP_LHZUX:
915 regs->gpr[rd] = 0xffff;
916 regs->gpr[ra] += regs->gpr[rb];
917 break;
918
919 case OP_31_XOP_LHAX:
920 regs->gpr[rd] = ~0UL;
921 break;
922
923 case OP_31_XOP_LHAUX:
924 regs->gpr[rd] = ~0UL;
925 regs->gpr[ra] += regs->gpr[rb];
926 break;
927
928 default:
929 return 0;
930 }
931 break;
932
933 case OP_LWZ:
934 regs->gpr[rd] = 0xffffffff;
935 break;
936
937 case OP_LWZU:
938 regs->gpr[rd] = 0xffffffff;
939 regs->gpr[ra] += (s16)d;
940 break;
941
942 case OP_LBZ:
943 regs->gpr[rd] = 0xff;
944 break;
945
946 case OP_LBZU:
947 regs->gpr[rd] = 0xff;
948 regs->gpr[ra] += (s16)d;
949 break;
950
951 case OP_LHZ:
952 regs->gpr[rd] = 0xffff;
953 break;
954
955 case OP_LHZU:
956 regs->gpr[rd] = 0xffff;
957 regs->gpr[ra] += (s16)d;
958 break;
959
960 case OP_LHA:
961 regs->gpr[rd] = ~0UL;
962 break;
963
964 case OP_LHAU:
965 regs->gpr[rd] = ~0UL;
966 regs->gpr[ra] += (s16)d;
967 break;
968
969 default:
970 return 0;
971 }
972
973 return 1;
974}
975
976static int is_in_pci_mem_space(phys_addr_t addr)
977{
978 struct pci_controller *hose;
979 struct resource *res;
980 int i;
981
982 list_for_each_entry(hose, &hose_list, list_node) {
983 if (!(hose->indirect_type & PPC_INDIRECT_TYPE_EXT_REG))
984 continue;
985
986 for (i = 0; i < 3; i++) {
987 res = &hose->mem_resources[i];
988 if ((res->flags & IORESOURCE_MEM) &&
989 addr >= res->start && addr <= res->end)
990 return 1;
991 }
992 }
993 return 0;
994}
995
996int fsl_pci_mcheck_exception(struct pt_regs *regs)
997{
998 u32 inst;
999 int ret;
1000 phys_addr_t addr = 0;
1001
1002 /* Let KVM/QEMU deal with the exception */
1003 if (regs->msr & MSR_GS)
1004 return 0;
1005
1006#ifdef CONFIG_PHYS_64BIT
1007 addr = mfspr(SPRN_MCARU);
1008 addr <<= 32;
1009#endif
1010 addr += mfspr(SPRN_MCAR);
1011
1012 if (is_in_pci_mem_space(addr)) {
1013 if (user_mode(regs)) {
1014 pagefault_disable();
1015 ret = get_user(regs->nip, &inst);
1016 pagefault_enable();
1017 } else {
1018 ret = probe_kernel_address(regs->nip, inst);
1019 }
1020
1021 if (mcheck_handle_load(regs, inst)) {
1022 regs->nip += 4;
1023 return 1;
1024 }
1025 }
1026
1027 return 0;
1028}
1029#endif
1030
871#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) 1031#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
872static const struct of_device_id pci_ids[] = { 1032static const struct of_device_id pci_ids[] = {
873 { .compatible = "fsl,mpc8540-pci", }, 1033 { .compatible = "fsl,mpc8540-pci", },
@@ -928,28 +1088,10 @@ static int fsl_pci_probe(struct platform_device *pdev)
928{ 1088{
929 int ret; 1089 int ret;
930 struct device_node *node; 1090 struct device_node *node;
931#ifdef CONFIG_SWIOTLB
932 struct pci_controller *hose;
933#endif
934 1091
935 node = pdev->dev.of_node; 1092 node = pdev->dev.of_node;
936 ret = fsl_add_bridge(pdev, fsl_pci_primary == node); 1093 ret = fsl_add_bridge(pdev, fsl_pci_primary == node);
937 1094
938#ifdef CONFIG_SWIOTLB
939 if (ret == 0) {
940 hose = pci_find_hose_for_OF_device(pdev->dev.of_node);
941
942 /*
943 * if we couldn't map all of DRAM via the dma windows
944 * we need SWIOTLB to handle buffers located outside of
945 * dma capable memory region
946 */
947 if (memblock_end_of_DRAM() - 1 > hose->dma_window_base_cur +
948 hose->dma_window_size)
949 ppc_swiotlb_enable = 1;
950 }
951#endif
952
953 mpc85xx_pci_err_probe(pdev); 1095 mpc85xx_pci_err_probe(pdev);
954 1096
955 return 0; 1097 return 0;
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index 72b5625330e2..defc422a375f 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -126,5 +126,11 @@ static inline int mpc85xx_pci_err_probe(struct platform_device *op)
126} 126}
127#endif 127#endif
128 128
129#ifdef CONFIG_FSL_PCI
130extern int fsl_pci_mcheck_exception(struct pt_regs *);
131#else
132static inline int fsl_pci_mcheck_exception(struct pt_regs *regs) {return 0; }
133#endif
134
129#endif /* __POWERPC_FSL_PCI_H */ 135#endif /* __POWERPC_FSL_PCI_H */
130#endif /* __KERNEL__ */ 136#endif /* __KERNEL__ */
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af79e1ea74b6..af0f9beddca9 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,7 +62,7 @@ static int __init add_rtc(void)
62 pd = platform_device_register_simple("rtc_cmos", -1, 62 pd = platform_device_register_simple("rtc_cmos", -1,
63 &res[0], num_res); 63 &res[0], num_res);
64 64
65 return PTR_RET(pd); 65 return PTR_ERR_OR_ZERO(pd);
66} 66}
67fs_initcall(add_rtc); 67fs_initcall(add_rtc);
68 68
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 7cd728b3b5e4..9dee47071af8 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -216,7 +216,7 @@ static int __init icp_native_init_one_node(struct device_node *np,
216 unsigned int *indx) 216 unsigned int *indx)
217{ 217{
218 unsigned int ilen; 218 unsigned int ilen;
219 const u32 *ireg; 219 const __be32 *ireg;
220 int i; 220 int i;
221 int reg_tuple_size; 221 int reg_tuple_size;
222 int num_servers = 0; 222 int num_servers = 0;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 9049d9f44485..fe0cca477164 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -49,7 +49,7 @@ void xics_update_irq_servers(void)
49 int i, j; 49 int i, j;
50 struct device_node *np; 50 struct device_node *np;
51 u32 ilen; 51 u32 ilen;
52 const u32 *ireg; 52 const __be32 *ireg;
53 u32 hcpuid; 53 u32 hcpuid;
54 54
55 /* Find the server numbers for the boot cpu. */ 55 /* Find the server numbers for the boot cpu. */
@@ -75,8 +75,8 @@ void xics_update_irq_servers(void)
75 * default distribution server 75 * default distribution server
76 */ 76 */
77 for (j = 0; j < i; j += 2) { 77 for (j = 0; j < i; j += 2) {
78 if (ireg[j] == hcpuid) { 78 if (be32_to_cpu(ireg[j]) == hcpuid) {
79 xics_default_distrib_server = ireg[j+1]; 79 xics_default_distrib_server = be32_to_cpu(ireg[j+1]);
80 break; 80 break;
81 } 81 }
82 } 82 }
@@ -383,7 +383,7 @@ void __init xics_register_ics(struct ics *ics)
383static void __init xics_get_server_size(void) 383static void __init xics_get_server_size(void)
384{ 384{
385 struct device_node *np; 385 struct device_node *np;
386 const u32 *isize; 386 const __be32 *isize;
387 387
388 /* We fetch the interrupt server size from the first ICS node 388 /* We fetch the interrupt server size from the first ICS node
389 * we find if any 389 * we find if any
@@ -394,7 +394,7 @@ static void __init xics_get_server_size(void)
394 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); 394 isize = of_get_property(np, "ibm,interrupt-server#-size", NULL);
395 if (!isize) 395 if (!isize)
396 return; 396 return;
397 xics_interrupt_server_size = *isize; 397 xics_interrupt_server_size = be32_to_cpu(*isize);
398 of_node_put(np); 398 of_node_put(np);
399} 399}
400 400
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 96bf5bd30fbc..af9d3469fb99 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -972,27 +972,27 @@ static void bootcmds(void)
972static int cpu_cmd(void) 972static int cpu_cmd(void)
973{ 973{
974#ifdef CONFIG_SMP 974#ifdef CONFIG_SMP
975 unsigned long cpu; 975 unsigned long cpu, first_cpu, last_cpu;
976 int timeout; 976 int timeout;
977 int count;
978 977
979 if (!scanhex(&cpu)) { 978 if (!scanhex(&cpu)) {
980 /* print cpus waiting or in xmon */ 979 /* print cpus waiting or in xmon */
981 printf("cpus stopped:"); 980 printf("cpus stopped:");
982 count = 0; 981 last_cpu = first_cpu = NR_CPUS;
983 for_each_possible_cpu(cpu) { 982 for_each_possible_cpu(cpu) {
984 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { 983 if (cpumask_test_cpu(cpu, &cpus_in_xmon)) {
985 if (count == 0) 984 if (cpu == last_cpu + 1) {
986 printf(" %x", cpu); 985 last_cpu = cpu;
987 ++count; 986 } else {
988 } else { 987 if (last_cpu != first_cpu)
989 if (count > 1) 988 printf("-%lx", last_cpu);
990 printf("-%x", cpu - 1); 989 last_cpu = first_cpu = cpu;
991 count = 0; 990 printf(" %lx", cpu);
991 }
992 } 992 }
993 } 993 }
994 if (count > 1) 994 if (last_cpu != first_cpu)
995 printf("-%x", NR_CPUS - 1); 995 printf("-%lx", last_cpu);
996 printf("\n"); 996 printf("\n");
997 return 0; 997 return 0;
998 } 998 }
@@ -1256,11 +1256,18 @@ const char *getvecname(unsigned long vec)
1256 case 0x700: ret = "(Program Check)"; break; 1256 case 0x700: ret = "(Program Check)"; break;
1257 case 0x800: ret = "(FPU Unavailable)"; break; 1257 case 0x800: ret = "(FPU Unavailable)"; break;
1258 case 0x900: ret = "(Decrementer)"; break; 1258 case 0x900: ret = "(Decrementer)"; break;
1259 case 0x980: ret = "(Hypervisor Decrementer)"; break;
1260 case 0xa00: ret = "(Doorbell)"; break;
1259 case 0xc00: ret = "(System Call)"; break; 1261 case 0xc00: ret = "(System Call)"; break;
1260 case 0xd00: ret = "(Single Step)"; break; 1262 case 0xd00: ret = "(Single Step)"; break;
1263 case 0xe40: ret = "(Emulation Assist)"; break;
1264 case 0xe60: ret = "(HMI)"; break;
1265 case 0xe80: ret = "(Hypervisor Doorbell)"; break;
1261 case 0xf00: ret = "(Performance Monitor)"; break; 1266 case 0xf00: ret = "(Performance Monitor)"; break;
1262 case 0xf20: ret = "(Altivec Unavailable)"; break; 1267 case 0xf20: ret = "(Altivec Unavailable)"; break;
1263 case 0x1300: ret = "(Instruction Breakpoint)"; break; 1268 case 0x1300: ret = "(Instruction Breakpoint)"; break;
1269 case 0x1500: ret = "(Denormalisation)"; break;
1270 case 0x1700: ret = "(Altivec Assist)"; break;
1264 default: ret = ""; 1271 default: ret = "";
1265 } 1272 }
1266 return ret; 1273 return ret;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b504f7f..8b7892bf6d8b 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,8 +116,10 @@ config S390
116 select HAVE_FUNCTION_GRAPH_TRACER 116 select HAVE_FUNCTION_GRAPH_TRACER
117 select HAVE_FUNCTION_TRACER 117 select HAVE_FUNCTION_TRACER
118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST 118 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
119 select HAVE_GENERIC_HARDIRQS
119 select HAVE_KERNEL_BZIP2 120 select HAVE_KERNEL_BZIP2
120 select HAVE_KERNEL_GZIP 121 select HAVE_KERNEL_GZIP
122 select HAVE_KERNEL_LZ4
121 select HAVE_KERNEL_LZMA 123 select HAVE_KERNEL_LZMA
122 select HAVE_KERNEL_LZO 124 select HAVE_KERNEL_LZO
123 select HAVE_KERNEL_XZ 125 select HAVE_KERNEL_XZ
@@ -227,11 +229,12 @@ config MARCH_Z196
227 not work on older machines. 229 not work on older machines.
228 230
229config MARCH_ZEC12 231config MARCH_ZEC12
230 bool "IBM zEC12" 232 bool "IBM zBC12 and zEC12"
231 select HAVE_MARCH_ZEC12_FEATURES if 64BIT 233 select HAVE_MARCH_ZEC12_FEATURES if 64BIT
232 help 234 help
233 Select this to enable optimizations for IBM zEC12 (2827 series). The 235 Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
234 kernel will be slightly faster but will not work on older machines. 236 2827 series). The kernel will be slightly faster but will not work on
237 older machines.
235 238
236endchoice 239endchoice
237 240
@@ -443,6 +446,16 @@ config PCI_NR_FUNCTIONS
443 This allows you to specify the maximum number of PCI functions which 446 This allows you to specify the maximum number of PCI functions which
444 this kernel will support. 447 this kernel will support.
445 448
449config PCI_NR_MSI
450 int "Maximum number of MSI interrupts (64-32768)"
451 range 64 32768
452 default "256"
453 help
454 This defines the number of virtual interrupts the kernel will
455 provide for MSI interrupts. If you configure your system to have
456 too few drivers will fail to allocate MSI interrupts for all
457 PCI devices.
458
446source "drivers/pci/Kconfig" 459source "drivers/pci/Kconfig"
447source "drivers/pci/pcie/Kconfig" 460source "drivers/pci/pcie/Kconfig"
448source "drivers/pci/hotplug/Kconfig" 461source "drivers/pci/hotplug/Kconfig"
@@ -709,6 +722,7 @@ config S390_GUEST
709 def_bool y 722 def_bool y
710 prompt "s390 support for virtio devices" 723 prompt "s390 support for virtio devices"
711 depends on 64BIT 724 depends on 64BIT
725 select TTY
712 select VIRTUALIZATION 726 select VIRTUALIZATION
713 select VIRTIO 727 select VIRTIO
714 select VIRTIO_CONSOLE 728 select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61c9985..866ecbe670e4 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
6 6
7BITS := $(if $(CONFIG_64BIT),64,31) 7BITS := $(if $(CONFIG_64BIT),64,31)
8 8
9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ 9targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
10 vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ 10targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
11 sizes.h head$(BITS).o 11targets += misc.o piggy.o sizes.h head$(BITS).o
12 12
13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 13KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 14KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
48 48
49suffix-$(CONFIG_KERNEL_GZIP) := gz 49suffix-$(CONFIG_KERNEL_GZIP) := gz
50suffix-$(CONFIG_KERNEL_BZIP2) := bz2 50suffix-$(CONFIG_KERNEL_BZIP2) := bz2
51suffix-$(CONFIG_KERNEL_LZ4) := lz4
51suffix-$(CONFIG_KERNEL_LZMA) := lzma 52suffix-$(CONFIG_KERNEL_LZMA) := lzma
52suffix-$(CONFIG_KERNEL_LZO) := lzo 53suffix-$(CONFIG_KERNEL_LZO) := lzo
53suffix-$(CONFIG_KERNEL_XZ) := xz 54suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
56 $(call if_changed,gzip) 57 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) 58$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
58 $(call if_changed,bzip2) 59 $(call if_changed,bzip2)
60$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
61 $(call if_changed,lz4)
59$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) 62$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
60 $(call if_changed,lzma) 63 $(call if_changed,lzma)
61$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) 64$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1cf221b..57cbaff1f397 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
47#include "../../../../lib/decompress_bunzip2.c" 47#include "../../../../lib/decompress_bunzip2.c"
48#endif 48#endif
49 49
50#ifdef CONFIG_KERNEL_LZ4
51#include "../../../../lib/decompress_unlz4.c"
52#endif
53
50#ifdef CONFIG_KERNEL_LZMA 54#ifdef CONFIG_KERNEL_LZMA
51#include "../../../../lib/decompress_unlzma.c" 55#include "../../../../lib/decompress_unlzma.c"
52#endif 56#endif
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f41e0ef7fdf9..79f2ac55253f 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -18,26 +18,23 @@
18#define UPDATE_FILE_MODE 0220 18#define UPDATE_FILE_MODE 0220
19#define DIR_MODE 0550 19#define DIR_MODE 0550
20 20
21extern struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent, 21extern struct dentry *hypfs_mkdir(struct dentry *parent, const char *name);
22 const char *name);
23 22
24extern struct dentry *hypfs_create_u64(struct super_block *sb, 23extern struct dentry *hypfs_create_u64(struct dentry *dir, const char *name,
25 struct dentry *dir, const char *name,
26 __u64 value); 24 __u64 value);
27 25
28extern struct dentry *hypfs_create_str(struct super_block *sb, 26extern struct dentry *hypfs_create_str(struct dentry *dir, const char *name,
29 struct dentry *dir, const char *name,
30 char *string); 27 char *string);
31 28
32/* LPAR Hypervisor */ 29/* LPAR Hypervisor */
33extern int hypfs_diag_init(void); 30extern int hypfs_diag_init(void);
34extern void hypfs_diag_exit(void); 31extern void hypfs_diag_exit(void);
35extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root); 32extern int hypfs_diag_create_files(struct dentry *root);
36 33
37/* VM Hypervisor */ 34/* VM Hypervisor */
38extern int hypfs_vm_init(void); 35extern int hypfs_vm_init(void);
39extern void hypfs_vm_exit(void); 36extern void hypfs_vm_exit(void);
40extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root); 37extern int hypfs_vm_create_files(struct dentry *root);
41 38
42/* debugfs interface */ 39/* debugfs interface */
43struct hypfs_dbfs_file; 40struct hypfs_dbfs_file;
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index bb5dd496614f..17ab8b7b53cc 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,7 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
105int hypfs_dbfs_init(void) 105int hypfs_dbfs_init(void)
106{ 106{
107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL); 107 dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
108 return PTR_RET(dbfs_dir); 108 return PTR_ERR_OR_ZERO(dbfs_dir);
109} 109}
110 110
111void hypfs_dbfs_exit(void) 111void hypfs_dbfs_exit(void)
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 138893e5f736..5eeffeefae06 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -623,8 +623,7 @@ void hypfs_diag_exit(void)
623 * ******************************************* 623 * *******************************************
624 */ 624 */
625 625
626static int hypfs_create_cpu_files(struct super_block *sb, 626static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
627 struct dentry *cpus_dir, void *cpu_info)
628{ 627{
629 struct dentry *cpu_dir; 628 struct dentry *cpu_dir;
630 char buffer[TMP_SIZE]; 629 char buffer[TMP_SIZE];
@@ -632,30 +631,29 @@ static int hypfs_create_cpu_files(struct super_block *sb,
632 631
633 snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type, 632 snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
634 cpu_info)); 633 cpu_info));
635 cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer); 634 cpu_dir = hypfs_mkdir(cpus_dir, buffer);
636 rc = hypfs_create_u64(sb, cpu_dir, "mgmtime", 635 rc = hypfs_create_u64(cpu_dir, "mgmtime",
637 cpu_info__acc_time(diag204_info_type, cpu_info) - 636 cpu_info__acc_time(diag204_info_type, cpu_info) -
638 cpu_info__lp_time(diag204_info_type, cpu_info)); 637 cpu_info__lp_time(diag204_info_type, cpu_info));
639 if (IS_ERR(rc)) 638 if (IS_ERR(rc))
640 return PTR_ERR(rc); 639 return PTR_ERR(rc);
641 rc = hypfs_create_u64(sb, cpu_dir, "cputime", 640 rc = hypfs_create_u64(cpu_dir, "cputime",
642 cpu_info__lp_time(diag204_info_type, cpu_info)); 641 cpu_info__lp_time(diag204_info_type, cpu_info));
643 if (IS_ERR(rc)) 642 if (IS_ERR(rc))
644 return PTR_ERR(rc); 643 return PTR_ERR(rc);
645 if (diag204_info_type == INFO_EXT) { 644 if (diag204_info_type == INFO_EXT) {
646 rc = hypfs_create_u64(sb, cpu_dir, "onlinetime", 645 rc = hypfs_create_u64(cpu_dir, "onlinetime",
647 cpu_info__online_time(diag204_info_type, 646 cpu_info__online_time(diag204_info_type,
648 cpu_info)); 647 cpu_info));
649 if (IS_ERR(rc)) 648 if (IS_ERR(rc))
650 return PTR_ERR(rc); 649 return PTR_ERR(rc);
651 } 650 }
652 diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer); 651 diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
653 rc = hypfs_create_str(sb, cpu_dir, "type", buffer); 652 rc = hypfs_create_str(cpu_dir, "type", buffer);
654 return PTR_RET(rc); 653 return PTR_RET(rc);
655} 654}
656 655
657static void *hypfs_create_lpar_files(struct super_block *sb, 656static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
658 struct dentry *systems_dir, void *part_hdr)
659{ 657{
660 struct dentry *cpus_dir; 658 struct dentry *cpus_dir;
661 struct dentry *lpar_dir; 659 struct dentry *lpar_dir;
@@ -665,16 +663,16 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
665 663
666 part_hdr__part_name(diag204_info_type, part_hdr, lpar_name); 664 part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
667 lpar_name[LPAR_NAME_LEN] = 0; 665 lpar_name[LPAR_NAME_LEN] = 0;
668 lpar_dir = hypfs_mkdir(sb, systems_dir, lpar_name); 666 lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
669 if (IS_ERR(lpar_dir)) 667 if (IS_ERR(lpar_dir))
670 return lpar_dir; 668 return lpar_dir;
671 cpus_dir = hypfs_mkdir(sb, lpar_dir, "cpus"); 669 cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
672 if (IS_ERR(cpus_dir)) 670 if (IS_ERR(cpus_dir))
673 return cpus_dir; 671 return cpus_dir;
674 cpu_info = part_hdr + part_hdr__size(diag204_info_type); 672 cpu_info = part_hdr + part_hdr__size(diag204_info_type);
675 for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) { 673 for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
676 int rc; 674 int rc;
677 rc = hypfs_create_cpu_files(sb, cpus_dir, cpu_info); 675 rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
678 if (rc) 676 if (rc)
679 return ERR_PTR(rc); 677 return ERR_PTR(rc);
680 cpu_info += cpu_info__size(diag204_info_type); 678 cpu_info += cpu_info__size(diag204_info_type);
@@ -682,8 +680,7 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
682 return cpu_info; 680 return cpu_info;
683} 681}
684 682
685static int hypfs_create_phys_cpu_files(struct super_block *sb, 683static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
686 struct dentry *cpus_dir, void *cpu_info)
687{ 684{
688 struct dentry *cpu_dir; 685 struct dentry *cpu_dir;
689 char buffer[TMP_SIZE]; 686 char buffer[TMP_SIZE];
@@ -691,32 +688,31 @@ static int hypfs_create_phys_cpu_files(struct super_block *sb,
691 688
692 snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type, 689 snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
693 cpu_info)); 690 cpu_info));
694 cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer); 691 cpu_dir = hypfs_mkdir(cpus_dir, buffer);
695 if (IS_ERR(cpu_dir)) 692 if (IS_ERR(cpu_dir))
696 return PTR_ERR(cpu_dir); 693 return PTR_ERR(cpu_dir);
697 rc = hypfs_create_u64(sb, cpu_dir, "mgmtime", 694 rc = hypfs_create_u64(cpu_dir, "mgmtime",
698 phys_cpu__mgm_time(diag204_info_type, cpu_info)); 695 phys_cpu__mgm_time(diag204_info_type, cpu_info));
699 if (IS_ERR(rc)) 696 if (IS_ERR(rc))
700 return PTR_ERR(rc); 697 return PTR_ERR(rc);
701 diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer); 698 diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
702 rc = hypfs_create_str(sb, cpu_dir, "type", buffer); 699 rc = hypfs_create_str(cpu_dir, "type", buffer);
703 return PTR_RET(rc); 700 return PTR_RET(rc);
704} 701}
705 702
706static void *hypfs_create_phys_files(struct super_block *sb, 703static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
707 struct dentry *parent_dir, void *phys_hdr)
708{ 704{
709 int i; 705 int i;
710 void *cpu_info; 706 void *cpu_info;
711 struct dentry *cpus_dir; 707 struct dentry *cpus_dir;
712 708
713 cpus_dir = hypfs_mkdir(sb, parent_dir, "cpus"); 709 cpus_dir = hypfs_mkdir(parent_dir, "cpus");
714 if (IS_ERR(cpus_dir)) 710 if (IS_ERR(cpus_dir))
715 return cpus_dir; 711 return cpus_dir;
716 cpu_info = phys_hdr + phys_hdr__size(diag204_info_type); 712 cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
717 for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) { 713 for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
718 int rc; 714 int rc;
719 rc = hypfs_create_phys_cpu_files(sb, cpus_dir, cpu_info); 715 rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
720 if (rc) 716 if (rc)
721 return ERR_PTR(rc); 717 return ERR_PTR(rc);
722 cpu_info += phys_cpu__size(diag204_info_type); 718 cpu_info += phys_cpu__size(diag204_info_type);
@@ -724,7 +720,7 @@ static void *hypfs_create_phys_files(struct super_block *sb,
724 return cpu_info; 720 return cpu_info;
725} 721}
726 722
727int hypfs_diag_create_files(struct super_block *sb, struct dentry *root) 723int hypfs_diag_create_files(struct dentry *root)
728{ 724{
729 struct dentry *systems_dir, *hyp_dir; 725 struct dentry *systems_dir, *hyp_dir;
730 void *time_hdr, *part_hdr; 726 void *time_hdr, *part_hdr;
@@ -735,7 +731,7 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
735 if (IS_ERR(buffer)) 731 if (IS_ERR(buffer))
736 return PTR_ERR(buffer); 732 return PTR_ERR(buffer);
737 733
738 systems_dir = hypfs_mkdir(sb, root, "systems"); 734 systems_dir = hypfs_mkdir(root, "systems");
739 if (IS_ERR(systems_dir)) { 735 if (IS_ERR(systems_dir)) {
740 rc = PTR_ERR(systems_dir); 736 rc = PTR_ERR(systems_dir);
741 goto err_out; 737 goto err_out;
@@ -743,25 +739,25 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
743 time_hdr = (struct x_info_blk_hdr *)buffer; 739 time_hdr = (struct x_info_blk_hdr *)buffer;
744 part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type); 740 part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
745 for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) { 741 for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
746 part_hdr = hypfs_create_lpar_files(sb, systems_dir, part_hdr); 742 part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
747 if (IS_ERR(part_hdr)) { 743 if (IS_ERR(part_hdr)) {
748 rc = PTR_ERR(part_hdr); 744 rc = PTR_ERR(part_hdr);
749 goto err_out; 745 goto err_out;
750 } 746 }
751 } 747 }
752 if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) { 748 if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
753 ptr = hypfs_create_phys_files(sb, root, part_hdr); 749 ptr = hypfs_create_phys_files(root, part_hdr);
754 if (IS_ERR(ptr)) { 750 if (IS_ERR(ptr)) {
755 rc = PTR_ERR(ptr); 751 rc = PTR_ERR(ptr);
756 goto err_out; 752 goto err_out;
757 } 753 }
758 } 754 }
759 hyp_dir = hypfs_mkdir(sb, root, "hyp"); 755 hyp_dir = hypfs_mkdir(root, "hyp");
760 if (IS_ERR(hyp_dir)) { 756 if (IS_ERR(hyp_dir)) {
761 rc = PTR_ERR(hyp_dir); 757 rc = PTR_ERR(hyp_dir);
762 goto err_out; 758 goto err_out;
763 } 759 }
764 ptr = hypfs_create_str(sb, hyp_dir, "type", "LPAR Hypervisor"); 760 ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
765 if (IS_ERR(ptr)) { 761 if (IS_ERR(ptr)) {
766 rc = PTR_ERR(ptr); 762 rc = PTR_ERR(ptr);
767 goto err_out; 763 goto err_out;
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index f364dcf77e8e..24908ce149f1 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -107,16 +107,15 @@ static void diag2fc_free(const void *data)
107 vfree(data); 107 vfree(data);
108} 108}
109 109
110#define ATTRIBUTE(sb, dir, name, member) \ 110#define ATTRIBUTE(dir, name, member) \
111do { \ 111do { \
112 void *rc; \ 112 void *rc; \
113 rc = hypfs_create_u64(sb, dir, name, member); \ 113 rc = hypfs_create_u64(dir, name, member); \
114 if (IS_ERR(rc)) \ 114 if (IS_ERR(rc)) \
115 return PTR_ERR(rc); \ 115 return PTR_ERR(rc); \
116} while(0) 116} while(0)
117 117
118static int hpyfs_vm_create_guest(struct super_block *sb, 118static int hpyfs_vm_create_guest(struct dentry *systems_dir,
119 struct dentry *systems_dir,
120 struct diag2fc_data *data) 119 struct diag2fc_data *data)
121{ 120{
122 char guest_name[NAME_LEN + 1] = {}; 121 char guest_name[NAME_LEN + 1] = {};
@@ -130,46 +129,46 @@ static int hpyfs_vm_create_guest(struct super_block *sb,
130 memcpy(guest_name, data->guest_name, NAME_LEN); 129 memcpy(guest_name, data->guest_name, NAME_LEN);
131 EBCASC(guest_name, NAME_LEN); 130 EBCASC(guest_name, NAME_LEN);
132 strim(guest_name); 131 strim(guest_name);
133 guest_dir = hypfs_mkdir(sb, systems_dir, guest_name); 132 guest_dir = hypfs_mkdir(systems_dir, guest_name);
134 if (IS_ERR(guest_dir)) 133 if (IS_ERR(guest_dir))
135 return PTR_ERR(guest_dir); 134 return PTR_ERR(guest_dir);
136 ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time); 135 ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
137 136
138 /* logical cpu information */ 137 /* logical cpu information */
139 cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus"); 138 cpus_dir = hypfs_mkdir(guest_dir, "cpus");
140 if (IS_ERR(cpus_dir)) 139 if (IS_ERR(cpus_dir))
141 return PTR_ERR(cpus_dir); 140 return PTR_ERR(cpus_dir);
142 ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu); 141 ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
143 ATTRIBUTE(sb, cpus_dir, "capped", capped_value); 142 ATTRIBUTE(cpus_dir, "capped", capped_value);
144 ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag); 143 ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
145 ATTRIBUTE(sb, cpus_dir, "count", data->vcpus); 144 ATTRIBUTE(cpus_dir, "count", data->vcpus);
146 ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min); 145 ATTRIBUTE(cpus_dir, "weight_min", data->cpu_min);
147 ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max); 146 ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
148 ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares); 147 ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
149 148
150 /* memory information */ 149 /* memory information */
151 mem_dir = hypfs_mkdir(sb, guest_dir, "mem"); 150 mem_dir = hypfs_mkdir(guest_dir, "mem");
152 if (IS_ERR(mem_dir)) 151 if (IS_ERR(mem_dir))
153 return PTR_ERR(mem_dir); 152 return PTR_ERR(mem_dir);
154 ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb); 153 ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
155 ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb); 154 ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
156 ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb); 155 ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
157 ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb); 156 ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
158 157
159 /* samples */ 158 /* samples */
160 samples_dir = hypfs_mkdir(sb, guest_dir, "samples"); 159 samples_dir = hypfs_mkdir(guest_dir, "samples");
161 if (IS_ERR(samples_dir)) 160 if (IS_ERR(samples_dir))
162 return PTR_ERR(samples_dir); 161 return PTR_ERR(samples_dir);
163 ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp); 162 ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
164 ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp); 163 ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
165 ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp); 164 ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
166 ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp); 165 ATTRIBUTE(samples_dir, "idle", data->idle_samp);
167 ATTRIBUTE(sb, samples_dir, "other", data->other_samp); 166 ATTRIBUTE(samples_dir, "other", data->other_samp);
168 ATTRIBUTE(sb, samples_dir, "total", data->total_samp); 167 ATTRIBUTE(samples_dir, "total", data->total_samp);
169 return 0; 168 return 0;
170} 169}
171 170
172int hypfs_vm_create_files(struct super_block *sb, struct dentry *root) 171int hypfs_vm_create_files(struct dentry *root)
173{ 172{
174 struct dentry *dir, *file; 173 struct dentry *dir, *file;
175 struct diag2fc_data *data; 174 struct diag2fc_data *data;
@@ -181,38 +180,38 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
181 return PTR_ERR(data); 180 return PTR_ERR(data);
182 181
183 /* Hpervisor Info */ 182 /* Hpervisor Info */
184 dir = hypfs_mkdir(sb, root, "hyp"); 183 dir = hypfs_mkdir(root, "hyp");
185 if (IS_ERR(dir)) { 184 if (IS_ERR(dir)) {
186 rc = PTR_ERR(dir); 185 rc = PTR_ERR(dir);
187 goto failed; 186 goto failed;
188 } 187 }
189 file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor"); 188 file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
190 if (IS_ERR(file)) { 189 if (IS_ERR(file)) {
191 rc = PTR_ERR(file); 190 rc = PTR_ERR(file);
192 goto failed; 191 goto failed;
193 } 192 }
194 193
195 /* physical cpus */ 194 /* physical cpus */
196 dir = hypfs_mkdir(sb, root, "cpus"); 195 dir = hypfs_mkdir(root, "cpus");
197 if (IS_ERR(dir)) { 196 if (IS_ERR(dir)) {
198 rc = PTR_ERR(dir); 197 rc = PTR_ERR(dir);
199 goto failed; 198 goto failed;
200 } 199 }
201 file = hypfs_create_u64(sb, dir, "count", data->lcpus); 200 file = hypfs_create_u64(dir, "count", data->lcpus);
202 if (IS_ERR(file)) { 201 if (IS_ERR(file)) {
203 rc = PTR_ERR(file); 202 rc = PTR_ERR(file);
204 goto failed; 203 goto failed;
205 } 204 }
206 205
207 /* guests */ 206 /* guests */
208 dir = hypfs_mkdir(sb, root, "systems"); 207 dir = hypfs_mkdir(root, "systems");
209 if (IS_ERR(dir)) { 208 if (IS_ERR(dir)) {
210 rc = PTR_ERR(dir); 209 rc = PTR_ERR(dir);
211 goto failed; 210 goto failed;
212 } 211 }
213 212
214 for (i = 0; i < count; i++) { 213 for (i = 0; i < count; i++) {
215 rc = hpyfs_vm_create_guest(sb, dir, &(data[i])); 214 rc = hpyfs_vm_create_guest(dir, &(data[i]));
216 if (rc) 215 if (rc)
217 goto failed; 216 goto failed;
218 } 217 }
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 7a539f4f5e30..ddfe09b45134 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -28,8 +28,7 @@
28#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ 28#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
29#define TMP_SIZE 64 /* size of temporary buffers */ 29#define TMP_SIZE 64 /* size of temporary buffers */
30 30
31static struct dentry *hypfs_create_update_file(struct super_block *sb, 31static struct dentry *hypfs_create_update_file(struct dentry *dir);
32 struct dentry *dir);
33 32
34struct hypfs_sb_info { 33struct hypfs_sb_info {
35 kuid_t uid; /* uid used for files and dirs */ 34 kuid_t uid; /* uid used for files and dirs */
@@ -193,9 +192,9 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
193 } 192 }
194 hypfs_delete_tree(sb->s_root); 193 hypfs_delete_tree(sb->s_root);
195 if (MACHINE_IS_VM) 194 if (MACHINE_IS_VM)
196 rc = hypfs_vm_create_files(sb, sb->s_root); 195 rc = hypfs_vm_create_files(sb->s_root);
197 else 196 else
198 rc = hypfs_diag_create_files(sb, sb->s_root); 197 rc = hypfs_diag_create_files(sb->s_root);
199 if (rc) { 198 if (rc) {
200 pr_err("Updating the hypfs tree failed\n"); 199 pr_err("Updating the hypfs tree failed\n");
201 hypfs_delete_tree(sb->s_root); 200 hypfs_delete_tree(sb->s_root);
@@ -302,12 +301,12 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
302 if (!root_dentry) 301 if (!root_dentry)
303 return -ENOMEM; 302 return -ENOMEM;
304 if (MACHINE_IS_VM) 303 if (MACHINE_IS_VM)
305 rc = hypfs_vm_create_files(sb, root_dentry); 304 rc = hypfs_vm_create_files(root_dentry);
306 else 305 else
307 rc = hypfs_diag_create_files(sb, root_dentry); 306 rc = hypfs_diag_create_files(root_dentry);
308 if (rc) 307 if (rc)
309 return rc; 308 return rc;
310 sbi->update_file = hypfs_create_update_file(sb, root_dentry); 309 sbi->update_file = hypfs_create_update_file(root_dentry);
311 if (IS_ERR(sbi->update_file)) 310 if (IS_ERR(sbi->update_file))
312 return PTR_ERR(sbi->update_file); 311 return PTR_ERR(sbi->update_file);
313 hypfs_update_update(sb); 312 hypfs_update_update(sb);
@@ -334,8 +333,7 @@ static void hypfs_kill_super(struct super_block *sb)
334 kill_litter_super(sb); 333 kill_litter_super(sb);
335} 334}
336 335
337static struct dentry *hypfs_create_file(struct super_block *sb, 336static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
338 struct dentry *parent, const char *name,
339 char *data, umode_t mode) 337 char *data, umode_t mode)
340{ 338{
341 struct dentry *dentry; 339 struct dentry *dentry;
@@ -347,7 +345,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
347 dentry = ERR_PTR(-ENOMEM); 345 dentry = ERR_PTR(-ENOMEM);
348 goto fail; 346 goto fail;
349 } 347 }
350 inode = hypfs_make_inode(sb, mode); 348 inode = hypfs_make_inode(parent->d_sb, mode);
351 if (!inode) { 349 if (!inode) {
352 dput(dentry); 350 dput(dentry);
353 dentry = ERR_PTR(-ENOMEM); 351 dentry = ERR_PTR(-ENOMEM);
@@ -373,24 +371,22 @@ fail:
373 return dentry; 371 return dentry;
374} 372}
375 373
376struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent, 374struct dentry *hypfs_mkdir(struct dentry *parent, const char *name)
377 const char *name)
378{ 375{
379 struct dentry *dentry; 376 struct dentry *dentry;
380 377
381 dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE); 378 dentry = hypfs_create_file(parent, name, NULL, S_IFDIR | DIR_MODE);
382 if (IS_ERR(dentry)) 379 if (IS_ERR(dentry))
383 return dentry; 380 return dentry;
384 hypfs_add_dentry(dentry); 381 hypfs_add_dentry(dentry);
385 return dentry; 382 return dentry;
386} 383}
387 384
388static struct dentry *hypfs_create_update_file(struct super_block *sb, 385static struct dentry *hypfs_create_update_file(struct dentry *dir)
389 struct dentry *dir)
390{ 386{
391 struct dentry *dentry; 387 struct dentry *dentry;
392 388
393 dentry = hypfs_create_file(sb, dir, "update", NULL, 389 dentry = hypfs_create_file(dir, "update", NULL,
394 S_IFREG | UPDATE_FILE_MODE); 390 S_IFREG | UPDATE_FILE_MODE);
395 /* 391 /*
396 * We do not put the update file on the 'delete' list with 392 * We do not put the update file on the 'delete' list with
@@ -400,7 +396,7 @@ static struct dentry *hypfs_create_update_file(struct super_block *sb,
400 return dentry; 396 return dentry;
401} 397}
402 398
403struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, 399struct dentry *hypfs_create_u64(struct dentry *dir,
404 const char *name, __u64 value) 400 const char *name, __u64 value)
405{ 401{
406 char *buffer; 402 char *buffer;
@@ -412,7 +408,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
412 if (!buffer) 408 if (!buffer)
413 return ERR_PTR(-ENOMEM); 409 return ERR_PTR(-ENOMEM);
414 dentry = 410 dentry =
415 hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE); 411 hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
416 if (IS_ERR(dentry)) { 412 if (IS_ERR(dentry)) {
417 kfree(buffer); 413 kfree(buffer);
418 return ERR_PTR(-ENOMEM); 414 return ERR_PTR(-ENOMEM);
@@ -421,7 +417,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
421 return dentry; 417 return dentry;
422} 418}
423 419
424struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir, 420struct dentry *hypfs_create_str(struct dentry *dir,
425 const char *name, char *string) 421 const char *name, char *string)
426{ 422{
427 char *buffer; 423 char *buffer;
@@ -432,7 +428,7 @@ struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
432 return ERR_PTR(-ENOMEM); 428 return ERR_PTR(-ENOMEM);
433 sprintf(buffer, "%s\n", string); 429 sprintf(buffer, "%s\n", string);
434 dentry = 430 dentry =
435 hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE); 431 hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
436 if (IS_ERR(dentry)) { 432 if (IS_ERR(dentry)) {
437 kfree(buffer); 433 kfree(buffer);
438 return ERR_PTR(-ENOMEM); 434 return ERR_PTR(-ENOMEM);
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 4066cee0c2d2..4bbb5957ed1b 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -9,6 +9,8 @@
9#ifndef _ASM_S390_AIRQ_H 9#ifndef _ASM_S390_AIRQ_H
10#define _ASM_S390_AIRQ_H 10#define _ASM_S390_AIRQ_H
11 11
12#include <linux/bit_spinlock.h>
13
12struct airq_struct { 14struct airq_struct {
13 struct hlist_node list; /* Handler queueing. */ 15 struct hlist_node list; /* Handler queueing. */
14 void (*handler)(struct airq_struct *); /* Thin-interrupt handler */ 16 void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
@@ -23,4 +25,69 @@ struct airq_struct {
23int register_adapter_interrupt(struct airq_struct *airq); 25int register_adapter_interrupt(struct airq_struct *airq);
24void unregister_adapter_interrupt(struct airq_struct *airq); 26void unregister_adapter_interrupt(struct airq_struct *airq);
25 27
28/* Adapter interrupt bit vector */
29struct airq_iv {
30 unsigned long *vector; /* Adapter interrupt bit vector */
31 unsigned long *avail; /* Allocation bit mask for the bit vector */
32 unsigned long *bitlock; /* Lock bit mask for the bit vector */
33 unsigned long *ptr; /* Pointer associated with each bit */
34 unsigned int *data; /* 32 bit value associated with each bit */
35 unsigned long bits; /* Number of bits in the vector */
36 unsigned long end; /* Number of highest allocated bit + 1 */
37 spinlock_t lock; /* Lock to protect alloc & free */
38};
39
40#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
41#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
42#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
43#define AIRQ_IV_DATA 8 /* Allocate the data array */
44
45struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
46void airq_iv_release(struct airq_iv *iv);
47unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
48void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
49unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
50 unsigned long end);
51
52static inline unsigned long airq_iv_end(struct airq_iv *iv)
53{
54 return iv->end;
55}
56
57static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
58{
59 const unsigned long be_to_le = BITS_PER_LONG - 1;
60 bit_spin_lock(bit ^ be_to_le, iv->bitlock);
61}
62
63static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
64{
65 const unsigned long be_to_le = BITS_PER_LONG - 1;
66 bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
67}
68
69static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
70 unsigned int data)
71{
72 iv->data[bit] = data;
73}
74
75static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
76 unsigned long bit)
77{
78 return iv->data[bit];
79}
80
81static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
82 unsigned long ptr)
83{
84 iv->ptr[bit] = ptr;
85}
86
87static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
88 unsigned long bit)
89{
90 return iv->ptr[bit];
91}
92
26#endif /* _ASM_S390_AIRQ_H */ 93#endif /* _ASM_S390_AIRQ_H */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f3..10135a38673c 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 216 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
217 asm volatile( 217 asm volatile(
218 " oc %O0(1,%R0),%1" 218 " oc %O0(1,%R0),%1"
219 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 219 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
220} 220}
221 221
222static inline void 222static inline void
@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 244 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
245 asm volatile( 245 asm volatile(
246 " nc %O0(1,%R0),%1" 246 " nc %O0(1,%R0),%1"
247 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); 247 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
248} 248}
249 249
250static inline void 250static inline void
@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); 271 addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
272 asm volatile( 272 asm volatile(
273 " xc %O0(1,%R0),%1" 273 " xc %O0(1,%R0),%1"
274 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); 274 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
275} 275}
276 276
277static inline void 277static inline void
@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
301 ch = *(unsigned char *) addr; 301 ch = *(unsigned char *) addr;
302 asm volatile( 302 asm volatile(
303 " oc %O0(1,%R0),%1" 303 " oc %O0(1,%R0),%1"
304 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 304 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
305 : "cc", "memory"); 305 : "cc", "memory");
306 return (ch >> (nr & 7)) & 1; 306 return (ch >> (nr & 7)) & 1;
307} 307}
@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
320 ch = *(unsigned char *) addr; 320 ch = *(unsigned char *) addr;
321 asm volatile( 321 asm volatile(
322 " nc %O0(1,%R0),%1" 322 " nc %O0(1,%R0),%1"
323 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) 323 : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
324 : "cc", "memory"); 324 : "cc", "memory");
325 return (ch >> (nr & 7)) & 1; 325 return (ch >> (nr & 7)) & 1;
326} 326}
@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
339 ch = *(unsigned char *) addr; 339 ch = *(unsigned char *) addr;
340 asm volatile( 340 asm volatile(
341 " xc %O0(1,%R0),%1" 341 " xc %O0(1,%R0),%1"
342 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) 342 : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
343 : "cc", "memory"); 343 : "cc", "memory");
344 return (ch >> (nr & 7)) & 1; 344 return (ch >> (nr & 7)) & 1;
345} 345}
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
693 size -= offset; 693 size -= offset;
694 p = addr + offset / BITS_PER_LONG; 694 p = addr + offset / BITS_PER_LONG;
695 if (bit) { 695 if (bit) {
696 set = __flo_word(0, *p & (~0UL << bit)); 696 set = __flo_word(0, *p & (~0UL >> bit));
697 if (set >= size) 697 if (set >= size)
698 return size + offset; 698 return size + offset;
699 if (set < BITS_PER_LONG) 699 if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ffb898961c8d..d42625053c37 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
296 return 0; 296 return 0;
297} 297}
298 298
299void channel_subsystem_reinit(void);
299extern void css_schedule_reprobe(void); 300extern void css_schedule_reprobe(void);
300 301
301extern void reipl_ccw_dev(struct ccw_dev_id *id); 302extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index d2ff41370c0c..f65bd3634519 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -13,9 +13,6 @@
13#include <asm/div64.h> 13#include <asm/div64.h>
14 14
15 15
16#define __ARCH_HAS_VTIME_ACCOUNT
17#define __ARCH_HAS_VTIME_TASK_SWITCH
18
19/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
20 17
21typedef unsigned long long __nocast cputime_t; 18typedef unsigned long long __nocast cputime_t;
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 0c82ba86e997..a908d2941c5d 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -20,4 +20,9 @@
20 20
21#define HARDIRQ_BITS 8 21#define HARDIRQ_BITS 8
22 22
23static inline void ack_bad_irq(unsigned int irq)
24{
25 printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
26}
27
23#endif /* __ASM_HARDIRQ_H */ 28#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bd90359d6d22..11eae5f55b70 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -17,6 +17,9 @@
17 17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte); 19 pte_t *ptep, pte_t pte);
20pte_t huge_ptep_get(pte_t *ptep);
21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
22 unsigned long addr, pte_t *ptep);
20 23
21/* 24/*
22 * If the arch doesn't supply something else, assume that hugepage 25 * If the arch doesn't supply something else, assume that hugepage
@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
38int arch_prepare_hugepage(struct page *page); 41int arch_prepare_hugepage(struct page *page);
39void arch_release_hugepage(struct page *page); 42void arch_release_hugepage(struct page *page);
40 43
41static inline pte_t huge_pte_wrprotect(pte_t pte) 44static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep)
42{ 46{
43 pte_val(pte) |= _PAGE_RO; 47 pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
44 return pte;
45} 48}
46 49
47static inline int huge_pte_none(pte_t pte) 50static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
51 unsigned long address, pte_t *ptep)
48{ 52{
49 return (pte_val(pte) & _SEGMENT_ENTRY_INV) && 53 huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
50 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
51} 54}
52 55
53static inline pte_t huge_ptep_get(pte_t *ptep) 56static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
57 unsigned long addr, pte_t *ptep,
58 pte_t pte, int dirty)
54{ 59{
55 pte_t pte = *ptep; 60 int changed = !pte_same(huge_ptep_get(ptep), pte);
56 unsigned long mask; 61 if (changed) {
57 62 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
58 if (!MACHINE_HAS_HPAGE) { 63 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
59 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
60 if (ptep) {
61 mask = pte_val(pte) &
62 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
63 pte = pte_mkhuge(*ptep);
64 pte_val(pte) |= mask;
65 }
66 } 64 }
67 return pte; 65 return changed;
68} 66}
69 67
70static inline void __pmd_csp(pmd_t *pmdp) 68static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
69 unsigned long addr, pte_t *ptep)
71{ 70{
72 register unsigned long reg2 asm("2") = pmd_val(*pmdp); 71 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
73 register unsigned long reg3 asm("3") = pmd_val(*pmdp) | 72 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
74 _SEGMENT_ENTRY_INV;
75 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
76
77 asm volatile(
78 " csp %1,%3"
79 : "=m" (*pmdp)
80 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
81} 73}
82 74
83static inline void huge_ptep_invalidate(struct mm_struct *mm, 75static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
84 unsigned long address, pte_t *ptep)
85{
86 pmd_t *pmdp = (pmd_t *) ptep;
87
88 if (MACHINE_HAS_IDTE)
89 __pmd_idte(address, pmdp);
90 else
91 __pmd_csp(pmdp);
92 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
93}
94
95static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
96 unsigned long addr, pte_t *ptep)
97{
98 pte_t pte = huge_ptep_get(ptep);
99
100 huge_ptep_invalidate(mm, addr, ptep);
101 return pte;
102}
103
104#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
105({ \
106 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
107 if (__changed) { \
108 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
109 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
110 } \
111 __changed; \
112})
113
114#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
115({ \
116 pte_t __pte = huge_ptep_get(__ptep); \
117 if (huge_pte_write(__pte)) { \
118 huge_ptep_invalidate(__mm, __addr, __ptep); \
119 set_huge_pte_at(__mm, __addr, __ptep, \
120 huge_pte_wrprotect(__pte)); \
121 } \
122})
123
124static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
125 unsigned long address, pte_t *ptep)
126{ 76{
127 huge_ptep_invalidate(vma->vm_mm, address, ptep); 77 return mk_pte(page, pgprot);
128} 78}
129 79
130static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) 80static inline int huge_pte_none(pte_t pte)
131{ 81{
132 pte_t pte; 82 return pte_none(pte);
133 pmd_t pmd;
134
135 pmd = mk_pmd_phys(page_to_phys(page), pgprot);
136 pte_val(pte) = pmd_val(pmd);
137 return pte;
138} 83}
139 84
140static inline int huge_pte_write(pte_t pte) 85static inline int huge_pte_write(pte_t pte)
141{ 86{
142 pmd_t pmd; 87 return pte_write(pte);
143
144 pmd_val(pmd) = pte_val(pte);
145 return pmd_write(pmd);
146} 88}
147 89
148static inline int huge_pte_dirty(pte_t pte) 90static inline int huge_pte_dirty(pte_t pte)
149{ 91{
150 /* No dirty bit in the segment table entry. */ 92 return pte_dirty(pte);
151 return 0;
152} 93}
153 94
154static inline pte_t huge_pte_mkwrite(pte_t pte) 95static inline pte_t huge_pte_mkwrite(pte_t pte)
155{ 96{
156 pmd_t pmd; 97 return pte_mkwrite(pte);
157
158 pmd_val(pmd) = pte_val(pte);
159 pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
160 return pte;
161} 98}
162 99
163static inline pte_t huge_pte_mkdirty(pte_t pte) 100static inline pte_t huge_pte_mkdirty(pte_t pte)
164{ 101{
165 /* No dirty bit in the segment table entry. */ 102 return pte_mkdirty(pte);
166 return pte;
167} 103}
168 104
169static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) 105static inline pte_t huge_pte_wrprotect(pte_t pte)
170{ 106{
171 pmd_t pmd; 107 return pte_wrprotect(pte);
172
173 pmd_val(pmd) = pte_val(pte);
174 pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
175 return pte;
176} 108}
177 109
178static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, 110static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
179 pte_t *ptep)
180{ 111{
181 pmd_clear((pmd_t *) ptep); 112 return pte_modify(pte, newprot);
182} 113}
183 114
184#endif /* _ASM_S390_HUGETLB_H */ 115#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
index 7e3d2586c1ff..ee96a8b697f9 100644
--- a/arch/s390/include/asm/hw_irq.h
+++ b/arch/s390/include/asm/hw_irq.h
@@ -4,19 +4,8 @@
4#include <linux/msi.h> 4#include <linux/msi.h>
5#include <linux/pci.h> 5#include <linux/pci.h>
6 6
7static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) 7void __init init_airq_interrupts(void);
8{ 8void __init init_cio_interrupts(void);
9 return __irq_get_msi_desc(irq); 9void __init init_ext_interrupts(void);
10}
11
12/* Must be called with msi map lock held */
13static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
14{
15 if (!msi)
16 return -EINVAL;
17
18 msi->irq = irq;
19 return 0;
20}
21 10
22#endif 11#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 87c17bfb2968..1eaa3625803c 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,17 +1,28 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#define EXT_INTERRUPT 1
5#define IO_INTERRUPT 2
6#define THIN_INTERRUPT 3
7
8#define NR_IRQS_BASE 4
9
10#ifdef CONFIG_PCI_NR_MSI
11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
12#else
13# define NR_IRQS NR_IRQS_BASE
14#endif
15
16/* This number is used when no interrupt has been assigned */
17#define NO_IRQ 0
18
19#ifndef __ASSEMBLY__
20
4#include <linux/hardirq.h> 21#include <linux/hardirq.h>
5#include <linux/percpu.h> 22#include <linux/percpu.h>
6#include <linux/cache.h> 23#include <linux/cache.h>
7#include <linux/types.h> 24#include <linux/types.h>
8 25
9enum interruption_main_class {
10 EXTERNAL_INTERRUPT,
11 IO_INTERRUPT,
12 NR_IRQS
13};
14
15enum interruption_class { 26enum interruption_class {
16 IRQEXT_CLK, 27 IRQEXT_CLK,
17 IRQEXT_EXC, 28 IRQEXT_EXC,
@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void);
72void measurement_alert_subclass_register(void); 83void measurement_alert_subclass_register(void);
73void measurement_alert_subclass_unregister(void); 84void measurement_alert_subclass_unregister(void);
74 85
75#ifdef CONFIG_LOCKDEP 86#define irq_canonicalize(irq) (irq)
76# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) 87
77# define disable_irq_nosync_lockdep_irqsave(irq, flags) \ 88#endif /* __ASSEMBLY__ */
78 disable_irq_nosync(irq)
79# define disable_irq_lockdep(irq) disable_irq(irq)
80# define enable_irq_lockdep(irq) enable_irq(irq)
81# define enable_irq_lockdep_irqrestore(irq, flags) \
82 enable_irq(irq)
83#endif
84 89
85#endif /* _ASM_IRQ_H */ 90#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3238d4004e84..e87ecaa2c569 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -274,6 +274,14 @@ struct kvm_arch{
274 int css_support; 274 int css_support;
275}; 275};
276 276
277#define KVM_HVA_ERR_BAD (-1UL)
278#define KVM_HVA_ERR_RO_BAD (-2UL)
279
280static inline bool kvm_is_error_hva(unsigned long addr)
281{
282 return IS_ERR_VALUE(addr);
283}
284
277extern int sie64a(struct kvm_s390_sie_block *, u64 *); 285extern int sie64a(struct kvm_s390_sie_block *, u64 *);
278extern char sie_exit; 286extern char sie_exit;
279#endif 287#endif
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 6340178748bf..ff132ac64ddd 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -12,8 +12,6 @@ typedef struct {
12 unsigned long asce_bits; 12 unsigned long asce_bits;
13 unsigned long asce_limit; 13 unsigned long asce_limit;
14 unsigned long vdso_base; 14 unsigned long vdso_base;
15 /* Cloned contexts will be created with extended page tables. */
16 unsigned int alloc_pgste:1;
17 /* The mmu context has extended page tables. */ 15 /* The mmu context has extended page tables. */
18 unsigned int has_pgste:1; 16 unsigned int has_pgste:1;
19} mm_context_t; 17} mm_context_t;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 084e7755ed9b..9f973d8de90e 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,24 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
21#ifdef CONFIG_64BIT 21#ifdef CONFIG_64BIT
22 mm->context.asce_bits |= _ASCE_TYPE_REGION3; 22 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
23#endif 23#endif
24 if (current->mm && current->mm->context.alloc_pgste) { 24 mm->context.has_pgste = 0;
25 /*
26 * alloc_pgste indicates, that any NEW context will be created
27 * with extended page tables. The old context is unchanged. The
28 * page table allocation and the page table operations will
29 * look at has_pgste to distinguish normal and extended page
30 * tables. The only way to create extended page tables is to
31 * set alloc_pgste and then create a new context (e.g. dup_mm).
32 * The page table allocation is called after init_new_context
33 * and if has_pgste is set, it will create extended page
34 * tables.
35 */
36 mm->context.has_pgste = 1;
37 mm->context.alloc_pgste = 1;
38 } else {
39 mm->context.has_pgste = 0;
40 mm->context.alloc_pgste = 0;
41 }
42 mm->context.asce_limit = STACK_TOP_MAX; 25 mm->context.asce_limit = STACK_TOP_MAX;
43 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); 26 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
44 return 0; 27 return 0;
@@ -77,8 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
77 WARN_ON(atomic_read(&prev->context.attach_count) < 0); 60 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
78 atomic_inc(&next->context.attach_count); 61 atomic_inc(&next->context.attach_count);
79 /* Check for TLBs not flushed yet */ 62 /* Check for TLBs not flushed yet */
80 if (next->context.flush_mm) 63 __tlb_flush_mm_lazy(next);
81 __tlb_flush_mm(next);
82} 64}
83 65
84#define enter_lazy_tlb(mm,tsk) do { } while (0) 66#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5d64fb7619cc..1e51f2915b2e 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -32,16 +32,6 @@
32 32
33void storage_key_init_range(unsigned long start, unsigned long end); 33void storage_key_init_range(unsigned long start, unsigned long end);
34 34
35static inline unsigned long pfmf(unsigned long function, unsigned long address)
36{
37 asm volatile(
38 " .insn rre,0xb9af0000,%[function],%[address]"
39 : [address] "+a" (address)
40 : [function] "d" (function)
41 : "memory");
42 return address;
43}
44
45static inline void clear_page(void *page) 35static inline void clear_page(void *page)
46{ 36{
47 register unsigned long reg1 asm ("1") = 0; 37 register unsigned long reg1 asm ("1") = 0;
@@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr)
150#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ 140#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
151#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ 141#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
152 142
153/*
154 * Test and clear referenced bit in storage key.
155 */
156#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
157static inline int page_test_and_clear_young(unsigned long pfn)
158{
159 return page_reset_referenced(pfn << PAGE_SHIFT);
160}
161
162struct page; 143struct page;
163void arch_free_page(struct page *page, int order); 144void arch_free_page(struct page *page, int order);
164void arch_alloc_page(struct page *page, int order); 145void arch_alloc_page(struct page *page, int order);
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6e577ba0e5da..c290f13d1c47 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -6,6 +6,7 @@
6/* must be set before including pci_clp.h */ 6/* must be set before including pci_clp.h */
7#define PCI_BAR_COUNT 6 7#define PCI_BAR_COUNT 6
8 8
9#include <linux/pci.h>
9#include <asm-generic/pci.h> 10#include <asm-generic/pci.h>
10#include <asm-generic/pci-dma-compat.h> 11#include <asm-generic/pci-dma-compat.h>
11#include <asm/pci_clp.h> 12#include <asm/pci_clp.h>
@@ -53,14 +54,9 @@ struct zpci_fmb {
53 atomic64_t unmapped_pages; 54 atomic64_t unmapped_pages;
54} __packed __aligned(16); 55} __packed __aligned(16);
55 56
56struct msi_map { 57#define ZPCI_MSI_VEC_BITS 11
57 unsigned long irq; 58#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
58 struct msi_desc *msi; 59#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
59 struct hlist_node msi_chain;
60};
61
62#define ZPCI_NR_MSI_VECS 64
63#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
64 60
65enum zpci_state { 61enum zpci_state {
66 ZPCI_FN_STATE_RESERVED, 62 ZPCI_FN_STATE_RESERVED,
@@ -91,8 +87,7 @@ struct zpci_dev {
91 87
92 /* IRQ stuff */ 88 /* IRQ stuff */
93 u64 msi_addr; /* MSI address */ 89 u64 msi_addr; /* MSI address */
94 struct zdev_irq_map *irq_map; 90 struct airq_iv *aibv; /* adapter interrupt bit vector */
95 struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
96 unsigned int aisb; /* number of the summary bit */ 91 unsigned int aisb; /* number of the summary bit */
97 92
98 /* DMA stuff */ 93 /* DMA stuff */
@@ -122,11 +117,6 @@ struct zpci_dev {
122 struct dentry *debugfs_perf; 117 struct dentry *debugfs_perf;
123}; 118};
124 119
125struct pci_hp_callback_ops {
126 int (*create_slot) (struct zpci_dev *zdev);
127 void (*remove_slot) (struct zpci_dev *zdev);
128};
129
130static inline bool zdev_enabled(struct zpci_dev *zdev) 120static inline bool zdev_enabled(struct zpci_dev *zdev)
131{ 121{
132 return (zdev->fh & (1UL << 31)) ? true : false; 122 return (zdev->fh & (1UL << 31)) ? true : false;
@@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
146int zpci_unregister_ioat(struct zpci_dev *, u8); 136int zpci_unregister_ioat(struct zpci_dev *, u8);
147 137
148/* CLP */ 138/* CLP */
149int clp_find_pci_devices(void); 139int clp_scan_pci_devices(void);
140int clp_rescan_pci_devices(void);
141int clp_rescan_pci_devices_simple(void);
150int clp_add_pci_device(u32, u32, int); 142int clp_add_pci_device(u32, u32, int);
151int clp_enable_fh(struct zpci_dev *, u8); 143int clp_enable_fh(struct zpci_dev *, u8);
152int clp_disable_fh(struct zpci_dev *); 144int clp_disable_fh(struct zpci_dev *);
153 145
154/* MSI */
155struct msi_desc *__irq_get_msi_desc(unsigned int);
156int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
157int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
158void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
159int zpci_msihash_init(void);
160void zpci_msihash_exit(void);
161
162#ifdef CONFIG_PCI 146#ifdef CONFIG_PCI
163/* Error handling and recovery */ 147/* Error handling and recovery */
164void zpci_event_error(void *); 148void zpci_event_error(void *);
165void zpci_event_availability(void *); 149void zpci_event_availability(void *);
150void zpci_rescan(void);
166#else /* CONFIG_PCI */ 151#else /* CONFIG_PCI */
167static inline void zpci_event_error(void *e) {} 152static inline void zpci_event_error(void *e) {}
168static inline void zpci_event_availability(void *e) {} 153static inline void zpci_event_availability(void *e) {}
154static inline void zpci_rescan(void) {}
169#endif /* CONFIG_PCI */ 155#endif /* CONFIG_PCI */
170 156
157#ifdef CONFIG_HOTPLUG_PCI_S390
158int zpci_init_slot(struct zpci_dev *);
159void zpci_exit_slot(struct zpci_dev *);
160#else /* CONFIG_HOTPLUG_PCI_S390 */
161static inline int zpci_init_slot(struct zpci_dev *zdev)
162{
163 return 0;
164}
165static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
166#endif /* CONFIG_HOTPLUG_PCI_S390 */
167
171/* Helpers */ 168/* Helpers */
172struct zpci_dev *get_zdev(struct pci_dev *); 169struct zpci_dev *get_zdev(struct pci_dev *);
173struct zpci_dev *get_zdev_by_fid(u32); 170struct zpci_dev *get_zdev_by_fid(u32);
174bool zpci_fid_present(u32);
175 171
176/* sysfs */ 172/* sysfs */
177int zpci_sysfs_add_device(struct device *); 173int zpci_sysfs_add_device(struct device *);
@@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *);
181int zpci_dma_init(void); 177int zpci_dma_init(void);
182void zpci_dma_exit(void); 178void zpci_dma_exit(void);
183 179
184/* Hotplug */
185extern struct mutex zpci_list_lock;
186extern struct list_head zpci_list;
187extern unsigned int s390_pci_probe;
188
189void zpci_register_hp_ops(struct pci_hp_callback_ops *);
190void zpci_deregister_hp_ops(void);
191
192/* FMB */ 180/* FMB */
193int zpci_fmb_enable_device(struct zpci_dev *); 181int zpci_fmb_enable_device(struct zpci_dev *);
194int zpci_fmb_disable_device(struct zpci_dev *); 182int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index e6a2bdd4d705..df6eac9f0cb4 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -79,11 +79,11 @@ struct zpci_fib {
79} __packed; 79} __packed;
80 80
81 81
82int s390pci_mod_fc(u64 req, struct zpci_fib *fib); 82int zpci_mod_fc(u64 req, struct zpci_fib *fib);
83int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); 83int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
84int s390pci_load(u64 *data, u64 req, u64 offset); 84int zpci_load(u64 *data, u64 req, u64 offset);
85int s390pci_store(u64 data, u64 req, u64 offset); 85int zpci_store(u64 data, u64 req, u64 offset);
86int s390pci_store_block(const u64 *data, u64 req, u64 offset); 86int zpci_store_block(const u64 *data, u64 req, u64 offset);
87void set_irq_ctrl(u16 ctl, char *unused, u8 isc); 87void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
88 88
89#endif 89#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 83a9caa6ae53..d194d544d694 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
36 u64 data; \ 36 u64 data; \
37 int rc; \ 37 int rc; \
38 \ 38 \
39 rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ 39 rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
40 if (rc) \ 40 if (rc) \
41 data = -1ULL; \ 41 data = -1ULL; \
42 return (RETTYPE) data; \ 42 return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ 50 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
51 u64 data = (VALTYPE) val; \ 51 u64 data = (VALTYPE) val; \
52 \ 52 \
53 s390pci_store(data, req, ZPCI_OFFSET(addr)); \ 53 zpci_store(data, req, ZPCI_OFFSET(addr)); \
54} 54}
55 55
56zpci_read(8, u64) 56zpci_read(8, u64)
@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
83 val = 0; /* let FW report error */ 83 val = 0; /* let FW report error */
84 break; 84 break;
85 } 85 }
86 return s390pci_store(val, req, offset); 86 return zpci_store(val, req, offset);
87} 87}
88 88
89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) 89static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
91 u64 data; 91 u64 data;
92 int cc; 92 int cc;
93 93
94 cc = s390pci_load(&data, req, offset); 94 cc = zpci_load(&data, req, offset);
95 if (cc) 95 if (cc)
96 goto out; 96 goto out;
97 97
@@ -115,7 +115,7 @@ out:
115 115
116static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) 116static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
117{ 117{
118 return s390pci_store_block(data, req, offset); 118 return zpci_store_block(data, req, offset);
119} 119}
120 120
121static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) 121static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75fb726de91f..9b60a36c348d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,63 +217,57 @@ extern unsigned long MODULES_END;
217 217
218/* Hardware bits in the page table entry */ 218/* Hardware bits in the page table entry */
219#define _PAGE_CO 0x100 /* HW Change-bit override */ 219#define _PAGE_CO 0x100 /* HW Change-bit override */
220#define _PAGE_RO 0x200 /* HW read-only bit */ 220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
221#define _PAGE_INVALID 0x400 /* HW invalid bit */ 221#define _PAGE_INVALID 0x400 /* HW invalid bit */
222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
222 223
223/* Software bits in the page table entry */ 224/* Software bits in the page table entry */
224#define _PAGE_SWT 0x001 /* SW pte type bit t */ 225#define _PAGE_PRESENT 0x001 /* SW pte present bit */
225#define _PAGE_SWX 0x002 /* SW pte type bit x */ 226#define _PAGE_TYPE 0x002 /* SW pte type bit */
226#define _PAGE_SWC 0x004 /* SW pte changed bit */ 227#define _PAGE_YOUNG 0x004 /* SW pte young bit */
227#define _PAGE_SWR 0x008 /* SW pte referenced bit */ 228#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
228#define _PAGE_SWW 0x010 /* SW pte write bit */ 229#define _PAGE_READ 0x010 /* SW pte read bit */
229#define _PAGE_SPECIAL 0x020 /* SW associated with special page */ 230#define _PAGE_WRITE 0x020 /* SW pte write bit */
231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
230#define __HAVE_ARCH_PTE_SPECIAL 232#define __HAVE_ARCH_PTE_SPECIAL
231 233
232/* Set of bits not changed in pte_modify */ 234/* Set of bits not changed in pte_modify */
233#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ 235#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
234 _PAGE_SWC | _PAGE_SWR) 236 _PAGE_DIRTY | _PAGE_YOUNG)
235
236/* Six different types of pages. */
237#define _PAGE_TYPE_EMPTY 0x400
238#define _PAGE_TYPE_NONE 0x401
239#define _PAGE_TYPE_SWAP 0x403
240#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
241#define _PAGE_TYPE_RO 0x200
242#define _PAGE_TYPE_RW 0x000
243
244/*
245 * Only four types for huge pages, using the invalid bit and protection bit
246 * of a segment table entry.
247 */
248#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
249#define _HPAGE_TYPE_NONE 0x220
250#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
251#define _HPAGE_TYPE_RW 0x000
252 237
253/* 238/*
254 * PTE type bits are rather complicated. handle_pte_fault uses pte_present, 239 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
255 * pte_none and pte_file to find out the pte type WITHOUT holding the page 240 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
256 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to 241 * is used to distinguish present from not-present ptes. It is changed only
257 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs 242 * with the page table lock held.
258 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. 243 *
259 * This change is done while holding the lock, but the intermediate step 244 * The following table gives the different possible bit combinations for
260 * of a previously valid pte with the hw invalid bit set can be observed by 245 * the pte hardware and software bits in the last 12 bits of a pte:
261 * handle_pte_fault. That makes it necessary that all valid pte types with
262 * the hw invalid bit set must be distinguishable from the four pte types
263 * empty, none, swap and file.
264 * 246 *
265 * irxt ipte irxt 247 * 842100000000
266 * _PAGE_TYPE_EMPTY 1000 -> 1000 248 * 000084210000
267 * _PAGE_TYPE_NONE 1001 -> 1001 249 * 000000008421
268 * _PAGE_TYPE_SWAP 1011 -> 1011 250 * .IR...wrdytp
269 * _PAGE_TYPE_FILE 11?1 -> 11?1 251 * empty .10...000000
270 * _PAGE_TYPE_RO 0100 -> 1100 252 * swap .10...xxxx10
271 * _PAGE_TYPE_RW 0000 -> 1000 253 * file .11...xxxxx0
254 * prot-none, clean, old .11...000001
255 * prot-none, clean, young .11...000101
256 * prot-none, dirty, old .10...001001
257 * prot-none, dirty, young .10...001101
258 * read-only, clean, old .11...010001
259 * read-only, clean, young .01...010101
260 * read-only, dirty, old .11...011001
261 * read-only, dirty, young .01...011101
262 * read-write, clean, old .11...110001
263 * read-write, clean, young .01...110101
264 * read-write, dirty, old .10...111001
265 * read-write, dirty, young .00...111101
272 * 266 *
273 * pte_none is true for bits combinations 1000, 1010, 1100, 1110 267 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
274 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 268 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
275 * pte_file is true for bits combinations 1101, 1111 269 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 270 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
277 */ 271 */
278 272
279#ifndef CONFIG_64BIT 273#ifndef CONFIG_64BIT
@@ -286,14 +280,25 @@ extern unsigned long MODULES_END;
286#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ 280#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
287 281
288/* Bits in the segment table entry */ 282/* Bits in the segment table entry */
283#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
289#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ 284#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
290#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 285#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
291#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 286#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
292#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 287#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
293#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 288#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
289#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
294 290
295#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 291#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
296#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 292#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
293
294/*
295 * Segment table entry encoding (I = invalid, R = read-only bit):
296 * ..R...I.....
297 * prot-none ..1...1.....
298 * read-only ..1...0.....
299 * read-write ..0...0.....
300 * empty ..0...1.....
301 */
297 302
298/* Page status table bits for virtualization */ 303/* Page status table bits for virtualization */
299#define PGSTE_ACC_BITS 0xf0000000UL 304#define PGSTE_ACC_BITS 0xf0000000UL
@@ -303,9 +308,7 @@ extern unsigned long MODULES_END;
303#define PGSTE_HC_BIT 0x00200000UL 308#define PGSTE_HC_BIT 0x00200000UL
304#define PGSTE_GR_BIT 0x00040000UL 309#define PGSTE_GR_BIT 0x00040000UL
305#define PGSTE_GC_BIT 0x00020000UL 310#define PGSTE_GC_BIT 0x00020000UL
306#define PGSTE_UR_BIT 0x00008000UL 311#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */
307#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
308#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
309 312
310#else /* CONFIG_64BIT */ 313#else /* CONFIG_64BIT */
311 314
@@ -324,8 +327,8 @@ extern unsigned long MODULES_END;
324 327
325/* Bits in the region table entry */ 328/* Bits in the region table entry */
326#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ 329#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
327#define _REGION_ENTRY_RO 0x200 /* region protection bit */ 330#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
328#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ 331#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
329#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ 332#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
330#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ 333#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
331#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ 334#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
@@ -333,29 +336,47 @@ extern unsigned long MODULES_END;
333#define _REGION_ENTRY_LENGTH 0x03 /* region third length */ 336#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
334 337
335#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) 338#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
336#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) 339#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
337#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) 340#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
338#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) 341#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
339#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) 342#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
340#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) 343#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
341 344
342#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ 345#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
343#define _REGION3_ENTRY_RO 0x200 /* page protection bit */ 346#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
344#define _REGION3_ENTRY_CO 0x100 /* change-recording override */ 347#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
345 348
346/* Bits in the segment table entry */ 349/* Bits in the segment table entry */
350#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
351#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL
347#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 352#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
348#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 353#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
349#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ 354#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
350#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ 355#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
351 356
352#define _SEGMENT_ENTRY (0) 357#define _SEGMENT_ENTRY (0)
353#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 358#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
354 359
355#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 360#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
356#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 361#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
362#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
363#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */
364#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG
365
366/*
367 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
368 * ..R...I...y.
369 * prot-none, old ..0...1...1.
370 * prot-none, young ..1...1...1.
371 * read-only, old ..1...1...0.
372 * read-only, young ..1...0...1.
373 * read-write, old ..0...1...0.
374 * read-write, young ..0...0...1.
375 * The segment table origin is used to distinguish empty (origin==0) from
376 * read-write, old segment table entries (origin!=0)
377 */
378
357#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 379#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
358#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
359 380
360/* Set of bits not changed in pmd_modify */ 381/* Set of bits not changed in pmd_modify */
361#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ 382#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
@@ -369,9 +390,7 @@ extern unsigned long MODULES_END;
369#define PGSTE_HC_BIT 0x0020000000000000UL 390#define PGSTE_HC_BIT 0x0020000000000000UL
370#define PGSTE_GR_BIT 0x0004000000000000UL 391#define PGSTE_GR_BIT 0x0004000000000000UL
371#define PGSTE_GC_BIT 0x0002000000000000UL 392#define PGSTE_GC_BIT 0x0002000000000000UL
372#define PGSTE_UR_BIT 0x0000800000000000UL 393#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
373#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
374#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
375 394
376#endif /* CONFIG_64BIT */ 395#endif /* CONFIG_64BIT */
377 396
@@ -386,14 +405,18 @@ extern unsigned long MODULES_END;
386/* 405/*
387 * Page protection definitions. 406 * Page protection definitions.
388 */ 407 */
389#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) 408#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
390#define PAGE_RO __pgprot(_PAGE_TYPE_RO) 409#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
391#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) 410 _PAGE_INVALID | _PAGE_PROTECT)
392#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) 411#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
393 412 _PAGE_INVALID | _PAGE_PROTECT)
394#define PAGE_KERNEL PAGE_RWC 413
395#define PAGE_SHARED PAGE_KERNEL 414#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
396#define PAGE_COPY PAGE_RO 415 _PAGE_YOUNG | _PAGE_DIRTY)
416#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
418#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
419 _PAGE_PROTECT)
397 420
398/* 421/*
399 * On s390 the page table entry has an invalid bit and a read-only bit. 422 * On s390 the page table entry has an invalid bit and a read-only bit.
@@ -402,35 +425,31 @@ extern unsigned long MODULES_END;
402 */ 425 */
403 /*xwr*/ 426 /*xwr*/
404#define __P000 PAGE_NONE 427#define __P000 PAGE_NONE
405#define __P001 PAGE_RO 428#define __P001 PAGE_READ
406#define __P010 PAGE_RO 429#define __P010 PAGE_READ
407#define __P011 PAGE_RO 430#define __P011 PAGE_READ
408#define __P100 PAGE_RO 431#define __P100 PAGE_READ
409#define __P101 PAGE_RO 432#define __P101 PAGE_READ
410#define __P110 PAGE_RO 433#define __P110 PAGE_READ
411#define __P111 PAGE_RO 434#define __P111 PAGE_READ
412 435
413#define __S000 PAGE_NONE 436#define __S000 PAGE_NONE
414#define __S001 PAGE_RO 437#define __S001 PAGE_READ
415#define __S010 PAGE_RW 438#define __S010 PAGE_WRITE
416#define __S011 PAGE_RW 439#define __S011 PAGE_WRITE
417#define __S100 PAGE_RO 440#define __S100 PAGE_READ
418#define __S101 PAGE_RO 441#define __S101 PAGE_READ
419#define __S110 PAGE_RW 442#define __S110 PAGE_WRITE
420#define __S111 PAGE_RW 443#define __S111 PAGE_WRITE
421 444
422/* 445/*
423 * Segment entry (large page) protection definitions. 446 * Segment entry (large page) protection definitions.
424 */ 447 */
425#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) 448#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
426#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) 449 _SEGMENT_ENTRY_NONE)
427#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) 450#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
428 451 _SEGMENT_ENTRY_PROTECT)
429static inline int mm_exclusive(struct mm_struct *mm) 452#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID)
430{
431 return likely(mm == current->active_mm &&
432 atomic_read(&mm->context.attach_count) <= 1);
433}
434 453
435static inline int mm_has_pgste(struct mm_struct *mm) 454static inline int mm_has_pgste(struct mm_struct *mm)
436{ 455{
@@ -467,7 +486,7 @@ static inline int pgd_none(pgd_t pgd)
467{ 486{
468 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) 487 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
469 return 0; 488 return 0;
470 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; 489 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
471} 490}
472 491
473static inline int pgd_bad(pgd_t pgd) 492static inline int pgd_bad(pgd_t pgd)
@@ -478,7 +497,7 @@ static inline int pgd_bad(pgd_t pgd)
478 * invalid for either table entry. 497 * invalid for either table entry.
479 */ 498 */
480 unsigned long mask = 499 unsigned long mask =
481 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 500 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
482 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 501 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
483 return (pgd_val(pgd) & mask) != 0; 502 return (pgd_val(pgd) & mask) != 0;
484} 503}
@@ -494,7 +513,7 @@ static inline int pud_none(pud_t pud)
494{ 513{
495 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) 514 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
496 return 0; 515 return 0;
497 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; 516 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
498} 517}
499 518
500static inline int pud_large(pud_t pud) 519static inline int pud_large(pud_t pud)
@@ -512,7 +531,7 @@ static inline int pud_bad(pud_t pud)
512 * invalid for either table entry. 531 * invalid for either table entry.
513 */ 532 */
514 unsigned long mask = 533 unsigned long mask =
515 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & 534 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
516 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; 535 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
517 return (pud_val(pud) & mask) != 0; 536 return (pud_val(pud) & mask) != 0;
518} 537}
@@ -521,30 +540,36 @@ static inline int pud_bad(pud_t pud)
521 540
522static inline int pmd_present(pmd_t pmd) 541static inline int pmd_present(pmd_t pmd)
523{ 542{
524 unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; 543 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
525 return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
526 !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
527} 544}
528 545
529static inline int pmd_none(pmd_t pmd) 546static inline int pmd_none(pmd_t pmd)
530{ 547{
531 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && 548 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
532 !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
533} 549}
534 550
535static inline int pmd_large(pmd_t pmd) 551static inline int pmd_large(pmd_t pmd)
536{ 552{
537#ifdef CONFIG_64BIT 553#ifdef CONFIG_64BIT
538 return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); 554 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
539#else 555#else
540 return 0; 556 return 0;
541#endif 557#endif
542} 558}
543 559
560static inline int pmd_prot_none(pmd_t pmd)
561{
562 return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) &&
563 (pmd_val(pmd) & _SEGMENT_ENTRY_NONE);
564}
565
544static inline int pmd_bad(pmd_t pmd) 566static inline int pmd_bad(pmd_t pmd)
545{ 567{
546 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; 568#ifdef CONFIG_64BIT
547 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; 569 if (pmd_large(pmd))
570 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
571#endif
572 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
548} 573}
549 574
550#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH 575#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
@@ -563,31 +588,40 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
563#define __HAVE_ARCH_PMD_WRITE 588#define __HAVE_ARCH_PMD_WRITE
564static inline int pmd_write(pmd_t pmd) 589static inline int pmd_write(pmd_t pmd)
565{ 590{
566 return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; 591 if (pmd_prot_none(pmd))
592 return 0;
593 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
567} 594}
568 595
569static inline int pmd_young(pmd_t pmd) 596static inline int pmd_young(pmd_t pmd)
570{ 597{
571 return 0; 598 int young = 0;
599#ifdef CONFIG_64BIT
600 if (pmd_prot_none(pmd))
601 young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
602 else
603 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
604#endif
605 return young;
572} 606}
573 607
574static inline int pte_none(pte_t pte) 608static inline int pte_present(pte_t pte)
575{ 609{
576 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); 610 /* Bit pattern: (pte & 0x001) == 0x001 */
611 return (pte_val(pte) & _PAGE_PRESENT) != 0;
577} 612}
578 613
579static inline int pte_present(pte_t pte) 614static inline int pte_none(pte_t pte)
580{ 615{
581 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; 616 /* Bit pattern: pte == 0x400 */
582 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || 617 return pte_val(pte) == _PAGE_INVALID;
583 (!(pte_val(pte) & _PAGE_INVALID) &&
584 !(pte_val(pte) & _PAGE_SWT));
585} 618}
586 619
587static inline int pte_file(pte_t pte) 620static inline int pte_file(pte_t pte)
588{ 621{
589 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; 622 /* Bit pattern: (pte & 0x601) == 0x600 */
590 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; 623 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
624 == (_PAGE_INVALID | _PAGE_PROTECT);
591} 625}
592 626
593static inline int pte_special(pte_t pte) 627static inline int pte_special(pte_t pte)
@@ -634,6 +668,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
634#endif 668#endif
635} 669}
636 670
671static inline pgste_t pgste_get(pte_t *ptep)
672{
673 unsigned long pgste = 0;
674#ifdef CONFIG_PGSTE
675 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
676#endif
677 return __pgste(pgste);
678}
679
637static inline void pgste_set(pte_t *ptep, pgste_t pgste) 680static inline void pgste_set(pte_t *ptep, pgste_t pgste)
638{ 681{
639#ifdef CONFIG_PGSTE 682#ifdef CONFIG_PGSTE
@@ -644,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste)
644static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) 687static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
645{ 688{
646#ifdef CONFIG_PGSTE 689#ifdef CONFIG_PGSTE
647 unsigned long address, bits; 690 unsigned long address, bits, skey;
648 unsigned char skey;
649 691
650 if (pte_val(*ptep) & _PAGE_INVALID) 692 if (pte_val(*ptep) & _PAGE_INVALID)
651 return pgste; 693 return pgste;
652 address = pte_val(*ptep) & PAGE_MASK; 694 address = pte_val(*ptep) & PAGE_MASK;
653 skey = page_get_storage_key(address); 695 skey = (unsigned long) page_get_storage_key(address);
654 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 696 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
655 /* Clear page changed & referenced bit in the storage key */ 697 if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) {
656 if (bits & _PAGE_CHANGED) 698 /* Transfer dirty + referenced bit to host bits in pgste */
699 pgste_val(pgste) |= bits << 52;
657 page_set_storage_key(address, skey ^ bits, 0); 700 page_set_storage_key(address, skey ^ bits, 0);
658 else if (bits) 701 } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) &&
702 (bits & _PAGE_REFERENCED)) {
703 /* Transfer referenced bit to host bit in pgste */
704 pgste_val(pgste) |= PGSTE_HR_BIT;
659 page_reset_referenced(address); 705 page_reset_referenced(address);
706 }
660 /* Transfer page changed & referenced bit to guest bits in pgste */ 707 /* Transfer page changed & referenced bit to guest bits in pgste */
661 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ 708 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
662 /* Get host changed & referenced bits from pgste */
663 bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
664 /* Transfer page changed & referenced bit to kvm user bits */
665 pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
666 /* Clear relevant host bits in pgste. */
667 pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
668 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
669 /* Copy page access key and fetch protection bit to pgste */ 709 /* Copy page access key and fetch protection bit to pgste */
670 pgste_val(pgste) |= 710 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
671 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 711 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
672 /* Transfer referenced bit to pte */
673 pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
674#endif 712#endif
675 return pgste; 713 return pgste;
676 714
@@ -679,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
679static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) 717static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
680{ 718{
681#ifdef CONFIG_PGSTE 719#ifdef CONFIG_PGSTE
682 int young;
683
684 if (pte_val(*ptep) & _PAGE_INVALID) 720 if (pte_val(*ptep) & _PAGE_INVALID)
685 return pgste; 721 return pgste;
686 /* Get referenced bit from storage key */ 722 /* Get referenced bit from storage key */
687 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); 723 if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
688 if (young) 724 pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT;
689 pgste_val(pgste) |= PGSTE_GR_BIT;
690 /* Get host referenced bit from pgste */
691 if (pgste_val(pgste) & PGSTE_HR_BIT) {
692 pgste_val(pgste) &= ~PGSTE_HR_BIT;
693 young = 1;
694 }
695 /* Transfer referenced bit to kvm user bits and pte */
696 if (young) {
697 pgste_val(pgste) |= PGSTE_UR_BIT;
698 pte_val(*ptep) |= _PAGE_SWR;
699 }
700#endif 725#endif
701 return pgste; 726 return pgste;
702} 727}
@@ -723,13 +748,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
723 748
724static inline void pgste_set_pte(pte_t *ptep, pte_t entry) 749static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
725{ 750{
726 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { 751 if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
727 /* 752 /*
728 * Without enhanced suppression-on-protection force 753 * Without enhanced suppression-on-protection force
729 * the dirty bit on for all writable ptes. 754 * the dirty bit on for all writable ptes.
730 */ 755 */
731 pte_val(entry) |= _PAGE_SWC; 756 pte_val(entry) |= _PAGE_DIRTY;
732 pte_val(entry) &= ~_PAGE_RO; 757 pte_val(entry) &= ~_PAGE_PROTECT;
733 } 758 }
734 *ptep = entry; 759 *ptep = entry;
735} 760}
@@ -841,21 +866,17 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
841 */ 866 */
842static inline int pte_write(pte_t pte) 867static inline int pte_write(pte_t pte)
843{ 868{
844 return (pte_val(pte) & _PAGE_SWW) != 0; 869 return (pte_val(pte) & _PAGE_WRITE) != 0;
845} 870}
846 871
847static inline int pte_dirty(pte_t pte) 872static inline int pte_dirty(pte_t pte)
848{ 873{
849 return (pte_val(pte) & _PAGE_SWC) != 0; 874 return (pte_val(pte) & _PAGE_DIRTY) != 0;
850} 875}
851 876
852static inline int pte_young(pte_t pte) 877static inline int pte_young(pte_t pte)
853{ 878{
854#ifdef CONFIG_PGSTE 879 return (pte_val(pte) & _PAGE_YOUNG) != 0;
855 if (pte_val(pte) & _PAGE_SWR)
856 return 1;
857#endif
858 return 0;
859} 880}
860 881
861/* 882/*
@@ -880,12 +901,12 @@ static inline void pud_clear(pud_t *pud)
880 901
881static inline void pmd_clear(pmd_t *pmdp) 902static inline void pmd_clear(pmd_t *pmdp)
882{ 903{
883 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; 904 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
884} 905}
885 906
886static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 907static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
887{ 908{
888 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 909 pte_val(*ptep) = _PAGE_INVALID;
889} 910}
890 911
891/* 912/*
@@ -896,55 +917,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
896{ 917{
897 pte_val(pte) &= _PAGE_CHG_MASK; 918 pte_val(pte) &= _PAGE_CHG_MASK;
898 pte_val(pte) |= pgprot_val(newprot); 919 pte_val(pte) |= pgprot_val(newprot);
899 if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) 920 /*
900 pte_val(pte) &= ~_PAGE_RO; 921 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
922 * invalid bit set, clear it again for readable, young pages
923 */
924 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
925 pte_val(pte) &= ~_PAGE_INVALID;
926 /*
927 * newprot for PAGE_READ and PAGE_WRITE has the page protection
928 * bit set, clear it again for writable, dirty pages
929 */
930 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
931 pte_val(pte) &= ~_PAGE_PROTECT;
901 return pte; 932 return pte;
902} 933}
903 934
904static inline pte_t pte_wrprotect(pte_t pte) 935static inline pte_t pte_wrprotect(pte_t pte)
905{ 936{
906 pte_val(pte) &= ~_PAGE_SWW; 937 pte_val(pte) &= ~_PAGE_WRITE;
907 /* Do not clobber _PAGE_TYPE_NONE pages! */ 938 pte_val(pte) |= _PAGE_PROTECT;
908 if (!(pte_val(pte) & _PAGE_INVALID))
909 pte_val(pte) |= _PAGE_RO;
910 return pte; 939 return pte;
911} 940}
912 941
913static inline pte_t pte_mkwrite(pte_t pte) 942static inline pte_t pte_mkwrite(pte_t pte)
914{ 943{
915 pte_val(pte) |= _PAGE_SWW; 944 pte_val(pte) |= _PAGE_WRITE;
916 if (pte_val(pte) & _PAGE_SWC) 945 if (pte_val(pte) & _PAGE_DIRTY)
917 pte_val(pte) &= ~_PAGE_RO; 946 pte_val(pte) &= ~_PAGE_PROTECT;
918 return pte; 947 return pte;
919} 948}
920 949
921static inline pte_t pte_mkclean(pte_t pte) 950static inline pte_t pte_mkclean(pte_t pte)
922{ 951{
923 pte_val(pte) &= ~_PAGE_SWC; 952 pte_val(pte) &= ~_PAGE_DIRTY;
924 /* Do not clobber _PAGE_TYPE_NONE pages! */ 953 pte_val(pte) |= _PAGE_PROTECT;
925 if (!(pte_val(pte) & _PAGE_INVALID))
926 pte_val(pte) |= _PAGE_RO;
927 return pte; 954 return pte;
928} 955}
929 956
930static inline pte_t pte_mkdirty(pte_t pte) 957static inline pte_t pte_mkdirty(pte_t pte)
931{ 958{
932 pte_val(pte) |= _PAGE_SWC; 959 pte_val(pte) |= _PAGE_DIRTY;
933 if (pte_val(pte) & _PAGE_SWW) 960 if (pte_val(pte) & _PAGE_WRITE)
934 pte_val(pte) &= ~_PAGE_RO; 961 pte_val(pte) &= ~_PAGE_PROTECT;
935 return pte; 962 return pte;
936} 963}
937 964
938static inline pte_t pte_mkold(pte_t pte) 965static inline pte_t pte_mkold(pte_t pte)
939{ 966{
940#ifdef CONFIG_PGSTE 967 pte_val(pte) &= ~_PAGE_YOUNG;
941 pte_val(pte) &= ~_PAGE_SWR; 968 pte_val(pte) |= _PAGE_INVALID;
942#endif
943 return pte; 969 return pte;
944} 970}
945 971
946static inline pte_t pte_mkyoung(pte_t pte) 972static inline pte_t pte_mkyoung(pte_t pte)
947{ 973{
974 pte_val(pte) |= _PAGE_YOUNG;
975 if (pte_val(pte) & _PAGE_READ)
976 pte_val(pte) &= ~_PAGE_INVALID;
948 return pte; 977 return pte;
949} 978}
950 979
@@ -957,7 +986,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
957#ifdef CONFIG_HUGETLB_PAGE 986#ifdef CONFIG_HUGETLB_PAGE
958static inline pte_t pte_mkhuge(pte_t pte) 987static inline pte_t pte_mkhuge(pte_t pte)
959{ 988{
960 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); 989 pte_val(pte) |= _PAGE_LARGE;
961 return pte; 990 return pte;
962} 991}
963#endif 992#endif
@@ -974,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
974 if (mm_has_pgste(mm)) { 1003 if (mm_has_pgste(mm)) {
975 pgste = pgste_get_lock(ptep); 1004 pgste = pgste_get_lock(ptep);
976 pgste = pgste_update_all(ptep, pgste); 1005 pgste = pgste_update_all(ptep, pgste);
977 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); 1006 dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
978 pgste_val(pgste) &= ~PGSTE_UC_BIT; 1007 pgste_val(pgste) &= ~PGSTE_HC_BIT;
979 pgste_set_unlock(ptep, pgste); 1008 pgste_set_unlock(ptep, pgste);
980 return dirty; 1009 return dirty;
981 } 1010 }
@@ -994,59 +1023,75 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
994 if (mm_has_pgste(mm)) { 1023 if (mm_has_pgste(mm)) {
995 pgste = pgste_get_lock(ptep); 1024 pgste = pgste_get_lock(ptep);
996 pgste = pgste_update_young(ptep, pgste); 1025 pgste = pgste_update_young(ptep, pgste);
997 young = !!(pgste_val(pgste) & PGSTE_UR_BIT); 1026 young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
998 pgste_val(pgste) &= ~PGSTE_UR_BIT; 1027 pgste_val(pgste) &= ~PGSTE_HR_BIT;
999 pgste_set_unlock(ptep, pgste); 1028 pgste_set_unlock(ptep, pgste);
1000 } 1029 }
1001 return young; 1030 return young;
1002} 1031}
1003 1032
1033static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1034{
1035 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036#ifndef CONFIG_64BIT
1037 /* pto must point to the start of the segment table */
1038 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
1039#else
1040 /* ipte in zarch mode can do the math */
1041 pte_t *pto = ptep;
1042#endif
1043 asm volatile(
1044 " ipte %2,%3"
1045 : "=m" (*ptep) : "m" (*ptep),
1046 "a" (pto), "a" (address));
1047 }
1048}
1049
1050static inline void ptep_flush_lazy(struct mm_struct *mm,
1051 unsigned long address, pte_t *ptep)
1052{
1053 int active = (mm == current->active_mm) ? 1 : 0;
1054
1055 if (atomic_read(&mm->context.attach_count) > active)
1056 __ptep_ipte(address, ptep);
1057 else
1058 mm->context.flush_mm = 1;
1059}
1060
1004#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 1061#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1005static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, 1062static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1006 unsigned long addr, pte_t *ptep) 1063 unsigned long addr, pte_t *ptep)
1007{ 1064{
1008 pgste_t pgste; 1065 pgste_t pgste;
1009 pte_t pte; 1066 pte_t pte;
1067 int young;
1010 1068
1011 if (mm_has_pgste(vma->vm_mm)) { 1069 if (mm_has_pgste(vma->vm_mm)) {
1012 pgste = pgste_get_lock(ptep); 1070 pgste = pgste_get_lock(ptep);
1013 pgste = pgste_update_young(ptep, pgste); 1071 pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
1014 pte = *ptep;
1015 *ptep = pte_mkold(pte);
1016 pgste_set_unlock(ptep, pgste);
1017 return pte_young(pte);
1018 } 1072 }
1019 return 0; 1073
1074 pte = *ptep;
1075 __ptep_ipte(addr, ptep);
1076 young = pte_young(pte);
1077 pte = pte_mkold(pte);
1078
1079 if (mm_has_pgste(vma->vm_mm)) {
1080 pgste_set_pte(ptep, pte);
1081 pgste_set_unlock(ptep, pgste);
1082 } else
1083 *ptep = pte;
1084
1085 return young;
1020} 1086}
1021 1087
1022#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH 1088#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1023static inline int ptep_clear_flush_young(struct vm_area_struct *vma, 1089static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1024 unsigned long address, pte_t *ptep) 1090 unsigned long address, pte_t *ptep)
1025{ 1091{
1026 /* No need to flush TLB
1027 * On s390 reference bits are in storage key and never in TLB
1028 * With virtualization we handle the reference bit, without we
1029 * we can simply return */
1030 return ptep_test_and_clear_young(vma, address, ptep); 1092 return ptep_test_and_clear_young(vma, address, ptep);
1031} 1093}
1032 1094
1033static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1034{
1035 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036#ifndef CONFIG_64BIT
1037 /* pto must point to the start of the segment table */
1038 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
1039#else
1040 /* ipte in zarch mode can do the math */
1041 pte_t *pto = ptep;
1042#endif
1043 asm volatile(
1044 " ipte %2,%3"
1045 : "=m" (*ptep) : "m" (*ptep),
1046 "a" (pto), "a" (address));
1047 }
1048}
1049
1050/* 1095/*
1051 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush 1096 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1052 * both clear the TLB for the unmapped pte. The reason is that 1097 * both clear the TLB for the unmapped pte. The reason is that
@@ -1067,16 +1112,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1067 pgste_t pgste; 1112 pgste_t pgste;
1068 pte_t pte; 1113 pte_t pte;
1069 1114
1070 mm->context.flush_mm = 1;
1071 if (mm_has_pgste(mm)) { 1115 if (mm_has_pgste(mm)) {
1072 pgste = pgste_get_lock(ptep); 1116 pgste = pgste_get_lock(ptep);
1073 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1117 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1074 } 1118 }
1075 1119
1076 pte = *ptep; 1120 pte = *ptep;
1077 if (!mm_exclusive(mm)) 1121 ptep_flush_lazy(mm, address, ptep);
1078 __ptep_ipte(address, ptep); 1122 pte_val(*ptep) = _PAGE_INVALID;
1079 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1080 1123
1081 if (mm_has_pgste(mm)) { 1124 if (mm_has_pgste(mm)) {
1082 pgste = pgste_update_all(&pte, pgste); 1125 pgste = pgste_update_all(&pte, pgste);
@@ -1093,15 +1136,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1093 pgste_t pgste; 1136 pgste_t pgste;
1094 pte_t pte; 1137 pte_t pte;
1095 1138
1096 mm->context.flush_mm = 1;
1097 if (mm_has_pgste(mm)) { 1139 if (mm_has_pgste(mm)) {
1098 pgste = pgste_get_lock(ptep); 1140 pgste = pgste_get_lock(ptep);
1099 pgste_ipte_notify(mm, address, ptep, pgste); 1141 pgste_ipte_notify(mm, address, ptep, pgste);
1100 } 1142 }
1101 1143
1102 pte = *ptep; 1144 pte = *ptep;
1103 if (!mm_exclusive(mm)) 1145 ptep_flush_lazy(mm, address, ptep);
1104 __ptep_ipte(address, ptep); 1146 pte_val(*ptep) |= _PAGE_INVALID;
1105 1147
1106 if (mm_has_pgste(mm)) { 1148 if (mm_has_pgste(mm)) {
1107 pgste = pgste_update_all(&pte, pgste); 1149 pgste = pgste_update_all(&pte, pgste);
@@ -1117,7 +1159,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1117 pgste_t pgste; 1159 pgste_t pgste;
1118 1160
1119 if (mm_has_pgste(mm)) { 1161 if (mm_has_pgste(mm)) {
1120 pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); 1162 pgste = pgste_get(ptep);
1121 pgste_set_key(ptep, pgste, pte); 1163 pgste_set_key(ptep, pgste, pte);
1122 pgste_set_pte(ptep, pte); 1164 pgste_set_pte(ptep, pte);
1123 pgste_set_unlock(ptep, pgste); 1165 pgste_set_unlock(ptep, pgste);
@@ -1139,7 +1181,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1139 1181
1140 pte = *ptep; 1182 pte = *ptep;
1141 __ptep_ipte(address, ptep); 1183 __ptep_ipte(address, ptep);
1142 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1184 pte_val(*ptep) = _PAGE_INVALID;
1143 1185
1144 if (mm_has_pgste(vma->vm_mm)) { 1186 if (mm_has_pgste(vma->vm_mm)) {
1145 pgste = pgste_update_all(&pte, pgste); 1187 pgste = pgste_update_all(&pte, pgste);
@@ -1163,18 +1205,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1163 pgste_t pgste; 1205 pgste_t pgste;
1164 pte_t pte; 1206 pte_t pte;
1165 1207
1166 if (mm_has_pgste(mm)) { 1208 if (!full && mm_has_pgste(mm)) {
1167 pgste = pgste_get_lock(ptep); 1209 pgste = pgste_get_lock(ptep);
1168 if (!full) 1210 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1169 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1170 } 1211 }
1171 1212
1172 pte = *ptep; 1213 pte = *ptep;
1173 if (!full) 1214 if (!full)
1174 __ptep_ipte(address, ptep); 1215 ptep_flush_lazy(mm, address, ptep);
1175 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1216 pte_val(*ptep) = _PAGE_INVALID;
1176 1217
1177 if (mm_has_pgste(mm)) { 1218 if (!full && mm_has_pgste(mm)) {
1178 pgste = pgste_update_all(&pte, pgste); 1219 pgste = pgste_update_all(&pte, pgste);
1179 pgste_set_unlock(ptep, pgste); 1220 pgste_set_unlock(ptep, pgste);
1180 } 1221 }
@@ -1189,14 +1230,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1189 pte_t pte = *ptep; 1230 pte_t pte = *ptep;
1190 1231
1191 if (pte_write(pte)) { 1232 if (pte_write(pte)) {
1192 mm->context.flush_mm = 1;
1193 if (mm_has_pgste(mm)) { 1233 if (mm_has_pgste(mm)) {
1194 pgste = pgste_get_lock(ptep); 1234 pgste = pgste_get_lock(ptep);
1195 pgste = pgste_ipte_notify(mm, address, ptep, pgste); 1235 pgste = pgste_ipte_notify(mm, address, ptep, pgste);
1196 } 1236 }
1197 1237
1198 if (!mm_exclusive(mm)) 1238 ptep_flush_lazy(mm, address, ptep);
1199 __ptep_ipte(address, ptep);
1200 pte = pte_wrprotect(pte); 1239 pte = pte_wrprotect(pte);
1201 1240
1202 if (mm_has_pgste(mm)) { 1241 if (mm_has_pgste(mm)) {
@@ -1240,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1240{ 1279{
1241 pte_t __pte; 1280 pte_t __pte;
1242 pte_val(__pte) = physpage + pgprot_val(pgprot); 1281 pte_val(__pte) = physpage + pgprot_val(pgprot);
1243 return __pte; 1282 return pte_mkyoung(__pte);
1244} 1283}
1245 1284
1246static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) 1285static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
@@ -1248,10 +1287,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1248 unsigned long physpage = page_to_phys(page); 1287 unsigned long physpage = page_to_phys(page);
1249 pte_t __pte = mk_pte_phys(physpage, pgprot); 1288 pte_t __pte = mk_pte_phys(physpage, pgprot);
1250 1289
1251 if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { 1290 if (pte_write(__pte) && PageDirty(page))
1252 pte_val(__pte) |= _PAGE_SWC; 1291 __pte = pte_mkdirty(__pte);
1253 pte_val(__pte) &= ~_PAGE_RO;
1254 }
1255 return __pte; 1292 return __pte;
1256} 1293}
1257 1294
@@ -1313,7 +1350,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1313 unsigned long sto = (unsigned long) pmdp - 1350 unsigned long sto = (unsigned long) pmdp -
1314 pmd_index(address) * sizeof(pmd_t); 1351 pmd_index(address) * sizeof(pmd_t);
1315 1352
1316 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { 1353 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
1317 asm volatile( 1354 asm volatile(
1318 " .insn rrf,0xb98e0000,%2,%3,0,0" 1355 " .insn rrf,0xb98e0000,%2,%3,0,0"
1319 : "=m" (*pmdp) 1356 : "=m" (*pmdp)
@@ -1324,24 +1361,68 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
1324 } 1361 }
1325} 1362}
1326 1363
1364static inline void __pmd_csp(pmd_t *pmdp)
1365{
1366 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1367 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1368 _SEGMENT_ENTRY_INVALID;
1369 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1370
1371 asm volatile(
1372 " csp %1,%3"
1373 : "=m" (*pmdp)
1374 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1375}
1376
1327#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) 1377#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1328static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) 1378static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1329{ 1379{
1330 /* 1380 /*
1331 * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) 1381 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1332 * Convert to segment table entry format. 1382 * Convert to segment table entry format.
1333 */ 1383 */
1334 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) 1384 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1335 return pgprot_val(SEGMENT_NONE); 1385 return pgprot_val(SEGMENT_NONE);
1336 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) 1386 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1337 return pgprot_val(SEGMENT_RO); 1387 return pgprot_val(SEGMENT_READ);
1338 return pgprot_val(SEGMENT_RW); 1388 return pgprot_val(SEGMENT_WRITE);
1389}
1390
1391static inline pmd_t pmd_mkyoung(pmd_t pmd)
1392{
1393#ifdef CONFIG_64BIT
1394 if (pmd_prot_none(pmd)) {
1395 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1396 } else {
1397 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1398 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1399 }
1400#endif
1401 return pmd;
1402}
1403
1404static inline pmd_t pmd_mkold(pmd_t pmd)
1405{
1406#ifdef CONFIG_64BIT
1407 if (pmd_prot_none(pmd)) {
1408 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1409 } else {
1410 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1411 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1412 }
1413#endif
1414 return pmd;
1339} 1415}
1340 1416
1341static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1417static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1342{ 1418{
1419 int young;
1420
1421 young = pmd_young(pmd);
1343 pmd_val(pmd) &= _SEGMENT_CHG_MASK; 1422 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1344 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1423 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1424 if (young)
1425 pmd = pmd_mkyoung(pmd);
1345 return pmd; 1426 return pmd;
1346} 1427}
1347 1428
@@ -1349,18 +1430,29 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1349{ 1430{
1350 pmd_t __pmd; 1431 pmd_t __pmd;
1351 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1432 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1352 return __pmd; 1433 return pmd_mkyoung(__pmd);
1353} 1434}
1354 1435
1355static inline pmd_t pmd_mkwrite(pmd_t pmd) 1436static inline pmd_t pmd_mkwrite(pmd_t pmd)
1356{ 1437{
1357 /* Do not clobber _HPAGE_TYPE_NONE pages! */ 1438 /* Do not clobber PROT_NONE segments! */
1358 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) 1439 if (!pmd_prot_none(pmd))
1359 pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; 1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1360 return pmd; 1441 return pmd;
1361} 1442}
1362#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1443#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1363 1444
1445static inline void pmdp_flush_lazy(struct mm_struct *mm,
1446 unsigned long address, pmd_t *pmdp)
1447{
1448 int active = (mm == current->active_mm) ? 1 : 0;
1449
1450 if ((atomic_read(&mm->context.attach_count) & 0xffff) > active)
1451 __pmd_idte(address, pmdp);
1452 else
1453 mm->context.flush_mm = 1;
1454}
1455
1364#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1456#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1365 1457
1366#define __HAVE_ARCH_PGTABLE_DEPOSIT 1458#define __HAVE_ARCH_PGTABLE_DEPOSIT
@@ -1378,7 +1470,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
1378static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1470static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1379 pmd_t *pmdp, pmd_t entry) 1471 pmd_t *pmdp, pmd_t entry)
1380{ 1472{
1381 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) 1473 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1382 pmd_val(entry) |= _SEGMENT_ENTRY_CO; 1474 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1383 *pmdp = entry; 1475 *pmdp = entry;
1384} 1476}
@@ -1391,7 +1483,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
1391 1483
1392static inline pmd_t pmd_wrprotect(pmd_t pmd) 1484static inline pmd_t pmd_wrprotect(pmd_t pmd)
1393{ 1485{
1394 pmd_val(pmd) |= _SEGMENT_ENTRY_RO; 1486 /* Do not clobber PROT_NONE segments! */
1487 if (!pmd_prot_none(pmd))
1488 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1395 return pmd; 1489 return pmd;
1396} 1490}
1397 1491
@@ -1401,50 +1495,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
1401 return pmd; 1495 return pmd;
1402} 1496}
1403 1497
1404static inline pmd_t pmd_mkold(pmd_t pmd)
1405{
1406 /* No referenced bit in the segment table entry. */
1407 return pmd;
1408}
1409
1410static inline pmd_t pmd_mkyoung(pmd_t pmd)
1411{
1412 /* No referenced bit in the segment table entry. */
1413 return pmd;
1414}
1415
1416#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG 1498#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1417static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, 1499static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1418 unsigned long address, pmd_t *pmdp) 1500 unsigned long address, pmd_t *pmdp)
1419{ 1501{
1420 unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; 1502 pmd_t pmd;
1421 long tmp, rc;
1422 int counter;
1423 1503
1424 rc = 0; 1504 pmd = *pmdp;
1425 if (MACHINE_HAS_RRBM) { 1505 __pmd_idte(address, pmdp);
1426 counter = PTRS_PER_PTE >> 6; 1506 *pmdp = pmd_mkold(pmd);
1427 asm volatile( 1507 return pmd_young(pmd);
1428 "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
1429 " ogr %1,%0\n"
1430 " la %3,0(%4,%3)\n"
1431 " brct %2,0b\n"
1432 : "=&d" (tmp), "+&d" (rc), "+d" (counter),
1433 "+a" (pmd_addr)
1434 : "a" (64 * 4096UL) : "cc");
1435 rc = !!rc;
1436 } else {
1437 counter = PTRS_PER_PTE;
1438 asm volatile(
1439 "0: rrbe 0,%2\n"
1440 " la %2,0(%3,%2)\n"
1441 " brc 12,1f\n"
1442 " lhi %0,1\n"
1443 "1: brct %1,0b\n"
1444 : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
1445 : "a" (4096UL) : "cc");
1446 }
1447 return rc;
1448} 1508}
1449 1509
1450#define __HAVE_ARCH_PMDP_GET_AND_CLEAR 1510#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
@@ -1510,10 +1570,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
1510 * exception will occur instead of a page translation exception. The 1570 * exception will occur instead of a page translation exception. The
1511 * specifiation exception has the bad habit not to store necessary 1571 * specifiation exception has the bad habit not to store necessary
1512 * information in the lowcore. 1572 * information in the lowcore.
1513 * Bit 21 and bit 22 are the page invalid bit and the page protection 1573 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1514 * bit. We set both to indicate a swapped page. 1574 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1515 * Bit 30 and 31 are used to distinguish the different page types. For
1516 * a swapped page these bits need to be zero.
1517 * This leaves the bits 1-19 and bits 24-29 to store type and offset. 1575 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1518 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 1576 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1519 * plus 24 for the offset. 1577 * plus 24 for the offset.
@@ -1527,10 +1585,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
1527 * exception will occur instead of a page translation exception. The 1585 * exception will occur instead of a page translation exception. The
1528 * specifiation exception has the bad habit not to store necessary 1586 * specifiation exception has the bad habit not to store necessary
1529 * information in the lowcore. 1587 * information in the lowcore.
1530 * Bit 53 and bit 54 are the page invalid bit and the page protection 1588 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1531 * bit. We set both to indicate a swapped page. 1589 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1532 * Bit 62 and 63 are used to distinguish the different page types. For
1533 * a swapped page these bits need to be zero.
1534 * This leaves the bits 0-51 and bits 56-61 to store type and offset. 1590 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1535 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 1591 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1536 * plus 56 for the offset. 1592 * plus 56 for the offset.
@@ -1547,7 +1603,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1547{ 1603{
1548 pte_t pte; 1604 pte_t pte;
1549 offset &= __SWP_OFFSET_MASK; 1605 offset &= __SWP_OFFSET_MASK;
1550 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | 1606 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1551 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); 1607 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1552 return pte; 1608 return pte;
1553} 1609}
@@ -1570,7 +1626,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1570 1626
1571#define pgoff_to_pte(__off) \ 1627#define pgoff_to_pte(__off) \
1572 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ 1628 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1573 | _PAGE_TYPE_FILE }) 1629 | _PAGE_INVALID | _PAGE_PROTECT })
1574 1630
1575#endif /* !__ASSEMBLY__ */ 1631#endif /* !__ASSEMBLY__ */
1576 1632
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b0e6435b2f02..0eb37505cab1 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -43,6 +43,7 @@ extern void execve_tail(void);
43#ifndef CONFIG_64BIT 43#ifndef CONFIG_64BIT
44 44
45#define TASK_SIZE (1UL << 31) 45#define TASK_SIZE (1UL << 31)
46#define TASK_MAX_SIZE (1UL << 31)
46#define TASK_UNMAPPED_BASE (1UL << 30) 47#define TASK_UNMAPPED_BASE (1UL << 30)
47 48
48#else /* CONFIG_64BIT */ 49#else /* CONFIG_64BIT */
@@ -51,6 +52,7 @@ extern void execve_tail(void);
51#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 52#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
52 (1UL << 30) : (1UL << 41)) 53 (1UL << 30) : (1UL << 41))
53#define TASK_SIZE TASK_SIZE_OF(current) 54#define TASK_SIZE TASK_SIZE_OF(current)
55#define TASK_MAX_SIZE (1UL << 53)
54 56
55#endif /* CONFIG_64BIT */ 57#endif /* CONFIG_64BIT */
56 58
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h
new file mode 100644
index 000000000000..5b3e48ef534b
--- /dev/null
+++ b/arch/s390/include/asm/serial.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_S390_SERIAL_H
2#define _ASM_S390_SERIAL_H
3
4#define BASE_BAUD 0
5
6#endif /* _ASM_S390_SERIAL_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 80b6f11263c4..6dbd559763c9 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -8,6 +8,7 @@
8#define __ASM_SWITCH_TO_H 8#define __ASM_SWITCH_TO_H
9 9
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11#include <asm/ptrace.h>
11 12
12extern struct task_struct *__switch_to(void *, void *); 13extern struct task_struct *__switch_to(void *, void *);
13extern void update_cr_regs(struct task_struct *task); 14extern void update_cr_regs(struct task_struct *task);
@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
68 69
69static inline void save_access_regs(unsigned int *acrs) 70static inline void save_access_regs(unsigned int *acrs)
70{ 71{
71 asm volatile("stam 0,15,%0" : "=Q" (*acrs)); 72 typedef struct { int _[NUM_ACRS]; } acrstype;
73
74 asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
72} 75}
73 76
74static inline void restore_access_regs(unsigned int *acrs) 77static inline void restore_access_regs(unsigned int *acrs)
75{ 78{
76 asm volatile("lam 0,15,%0" : : "Q" (*acrs)); 79 typedef struct { int _[NUM_ACRS]; } acrstype;
80
81 asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
77} 82}
78 83
79#define switch_to(prev,next,last) do { \ 84#define switch_to(prev,next,last) do { \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d686684..2cb846c4b37f 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
32 struct mm_struct *mm; 32 struct mm_struct *mm;
33 struct mmu_table_batch *batch; 33 struct mmu_table_batch *batch;
34 unsigned int fullmm; 34 unsigned int fullmm;
35 unsigned long start, end;
35}; 36};
36 37
37struct mmu_table_batch { 38struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
48 49
49static inline void tlb_gather_mmu(struct mmu_gather *tlb, 50static inline void tlb_gather_mmu(struct mmu_gather *tlb,
50 struct mm_struct *mm, 51 struct mm_struct *mm,
51 unsigned int full_mm_flush) 52 unsigned long start,
53 unsigned long end)
52{ 54{
53 tlb->mm = mm; 55 tlb->mm = mm;
54 tlb->fullmm = full_mm_flush; 56 tlb->start = start;
57 tlb->end = end;
58 tlb->fullmm = !(start | (end+1));
55 tlb->batch = NULL; 59 tlb->batch = NULL;
56 if (tlb->fullmm) 60 if (tlb->fullmm)
57 __tlb_flush_mm(mm); 61 __tlb_flush_mm(mm);
@@ -59,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
59 63
60static inline void tlb_flush_mmu(struct mmu_gather *tlb) 64static inline void tlb_flush_mmu(struct mmu_gather *tlb)
61{ 65{
66 __tlb_flush_mm_lazy(tlb->mm);
62 tlb_table_flush(tlb); 67 tlb_table_flush(tlb);
63} 68}
64 69
65static inline void tlb_finish_mmu(struct mmu_gather *tlb, 70static inline void tlb_finish_mmu(struct mmu_gather *tlb,
66 unsigned long start, unsigned long end) 71 unsigned long start, unsigned long end)
67{ 72{
68 tlb_table_flush(tlb); 73 tlb_flush_mmu(tlb);
69} 74}
70 75
71/* 76/*
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 6b32af30878c..f9fef0425fee 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
86 __tlb_flush_full(mm); 86 __tlb_flush_full(mm);
87} 87}
88 88
89static inline void __tlb_flush_mm_cond(struct mm_struct * mm) 89static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
90{ 90{
91 if (mm->context.flush_mm) { 91 if (mm->context.flush_mm) {
92 __tlb_flush_mm(mm); 92 __tlb_flush_mm(mm);
@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
118 118
119static inline void flush_tlb_mm(struct mm_struct *mm) 119static inline void flush_tlb_mm(struct mm_struct *mm)
120{ 120{
121 __tlb_flush_mm_cond(mm); 121 __tlb_flush_mm_lazy(mm);
122} 122}
123 123
124static inline void flush_tlb_range(struct vm_area_struct *vma, 124static inline void flush_tlb_range(struct vm_area_struct *vma,
125 unsigned long start, unsigned long end) 125 unsigned long start, unsigned long end)
126{ 126{
127 __tlb_flush_mm_cond(vma->vm_mm); 127 __tlb_flush_mm_lazy(vma->vm_mm);
128} 128}
129 129
130static inline void flush_tlb_kernel_range(unsigned long start, 130static inline void flush_tlb_kernel_range(unsigned long start,
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
new file mode 100644
index 000000000000..af9896c53eb3
--- /dev/null
+++ b/arch/s390/include/asm/vtime.h
@@ -0,0 +1,7 @@
1#ifndef _S390_VTIME_H
2#define _S390_VTIME_H
3
4#define __ARCH_HAS_VTIME_ACCOUNT
5#define __ARCH_HAS_VTIME_TASK_SWITCH
6
7#endif /* _S390_VTIME_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index be7a408be7a1..cc30d1fb000c 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -18,6 +18,7 @@
18#include <asm/unistd.h> 18#include <asm/unistd.h>
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/sigp.h> 20#include <asm/sigp.h>
21#include <asm/irq.h>
21 22
22__PT_R0 = __PT_GPRS 23__PT_R0 = __PT_GPRS
23__PT_R1 = __PT_GPRS + 4 24__PT_R1 = __PT_GPRS + 4
@@ -435,6 +436,11 @@ io_skip:
435io_loop: 436io_loop:
436 l %r1,BASED(.Ldo_IRQ) 437 l %r1,BASED(.Ldo_IRQ)
437 lr %r2,%r11 # pass pointer to pt_regs 438 lr %r2,%r11 # pass pointer to pt_regs
439 lhi %r3,IO_INTERRUPT
440 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
441 jz io_call
442 lhi %r3,THIN_INTERRUPT
443io_call:
438 basr %r14,%r1 # call do_IRQ 444 basr %r14,%r1 # call do_IRQ
439 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR 445 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
440 jz io_return 446 jz io_return
@@ -584,9 +590,10 @@ ext_skip:
584 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR 590 mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
585 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 591 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
586 TRACE_IRQS_OFF 592 TRACE_IRQS_OFF
593 l %r1,BASED(.Ldo_IRQ)
587 lr %r2,%r11 # pass pointer to pt_regs 594 lr %r2,%r11 # pass pointer to pt_regs
588 l %r1,BASED(.Ldo_extint) 595 lhi %r3,EXT_INTERRUPT
589 basr %r14,%r1 # call do_extint 596 basr %r14,%r1 # call do_IRQ
590 j io_return 597 j io_return
591 598
592/* 599/*
@@ -879,13 +886,13 @@ cleanup_idle:
879 stm %r9,%r10,__LC_SYSTEM_TIMER 886 stm %r9,%r10,__LC_SYSTEM_TIMER
880 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 887 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
881 # prepare return psw 888 # prepare return psw
882 n %r8,BASED(cleanup_idle_wait) # clear wait state bit 889 n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
883 l %r9,24(%r11) # return from psw_idle 890 l %r9,24(%r11) # return from psw_idle
884 br %r14 891 br %r14
885cleanup_idle_insn: 892cleanup_idle_insn:
886 .long psw_idle_lpsw + 0x80000000 893 .long psw_idle_lpsw + 0x80000000
887cleanup_idle_wait: 894cleanup_idle_wait:
888 .long 0xfffdffff 895 .long 0xfcfdffff
889 896
890/* 897/*
891 * Integer constants 898 * Integer constants
@@ -902,7 +909,6 @@ cleanup_idle_wait:
902.Ldo_machine_check: .long s390_do_machine_check 909.Ldo_machine_check: .long s390_do_machine_check
903.Lhandle_mcck: .long s390_handle_mcck 910.Lhandle_mcck: .long s390_handle_mcck
904.Ldo_IRQ: .long do_IRQ 911.Ldo_IRQ: .long do_IRQ
905.Ldo_extint: .long do_extint
906.Ldo_signal: .long do_signal 912.Ldo_signal: .long do_signal
907.Ldo_notify_resume: .long do_notify_resume 913.Ldo_notify_resume: .long do_notify_resume
908.Ldo_per_trap: .long do_per_trap 914.Ldo_per_trap: .long do_per_trap
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1c039d0c24c7..2b2188b97c6a 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -19,6 +19,7 @@
19#include <asm/unistd.h> 19#include <asm/unistd.h>
20#include <asm/page.h> 20#include <asm/page.h>
21#include <asm/sigp.h> 21#include <asm/sigp.h>
22#include <asm/irq.h>
22 23
23__PT_R0 = __PT_GPRS 24__PT_R0 = __PT_GPRS
24__PT_R1 = __PT_GPRS + 8 25__PT_R1 = __PT_GPRS + 8
@@ -468,6 +469,11 @@ io_skip:
468 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 469 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
469io_loop: 470io_loop:
470 lgr %r2,%r11 # pass pointer to pt_regs 471 lgr %r2,%r11 # pass pointer to pt_regs
472 lghi %r3,IO_INTERRUPT
473 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
474 jz io_call
475 lghi %r3,THIN_INTERRUPT
476io_call:
471 brasl %r14,do_IRQ 477 brasl %r14,do_IRQ
472 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR 478 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
473 jz io_return 479 jz io_return
@@ -623,7 +629,8 @@ ext_skip:
623 TRACE_IRQS_OFF 629 TRACE_IRQS_OFF
624 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 630 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
625 lgr %r2,%r11 # pass pointer to pt_regs 631 lgr %r2,%r11 # pass pointer to pt_regs
626 brasl %r14,do_extint 632 lghi %r3,EXT_INTERRUPT
633 brasl %r14,do_IRQ
627 j io_return 634 j io_return
628 635
629/* 636/*
@@ -922,7 +929,7 @@ cleanup_idle:
922 stg %r9,__LC_SYSTEM_TIMER 929 stg %r9,__LC_SYSTEM_TIMER
923 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 930 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
924 # prepare return psw 931 # prepare return psw
925 nihh %r8,0xfffd # clear wait state bit 932 nihh %r8,0xfcfd # clear irq & wait state bits
926 lg %r9,48(%r11) # return from psw_idle 933 lg %r9,48(%r11) # return from psw_idle
927 br %r14 934 br %r14
928cleanup_idle_insn: 935cleanup_idle_insn:
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 54b0995514e8..b34ba0ea96a9 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -22,6 +22,7 @@
22#include <asm/cputime.h> 22#include <asm/cputime.h>
23#include <asm/lowcore.h> 23#include <asm/lowcore.h>
24#include <asm/irq.h> 24#include <asm/irq.h>
25#include <asm/hw_irq.h>
25#include "entry.h" 26#include "entry.h"
26 27
27DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); 28DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -42,9 +43,10 @@ struct irq_class {
42 * Since the external and I/O interrupt fields are already sums we would end 43 * Since the external and I/O interrupt fields are already sums we would end
43 * up with having a sum which accounts each interrupt twice. 44 * up with having a sum which accounts each interrupt twice.
44 */ 45 */
45static const struct irq_class irqclass_main_desc[NR_IRQS] = { 46static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
46 [EXTERNAL_INTERRUPT] = {.name = "EXT"}, 47 [EXT_INTERRUPT] = {.name = "EXT"},
47 [IO_INTERRUPT] = {.name = "I/O"} 48 [IO_INTERRUPT] = {.name = "I/O"},
49 [THIN_INTERRUPT] = {.name = "AIO"},
48}; 50};
49 51
50/* 52/*
@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
86 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, 88 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
87}; 89};
88 90
91void __init init_IRQ(void)
92{
93 irq_reserve_irqs(0, THIN_INTERRUPT);
94 init_cio_interrupts();
95 init_airq_interrupts();
96 init_ext_interrupts();
97}
98
99void do_IRQ(struct pt_regs *regs, int irq)
100{
101 struct pt_regs *old_regs;
102
103 old_regs = set_irq_regs(regs);
104 irq_enter();
105 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
106 /* Serve timer interrupts first. */
107 clock_comparator_work();
108 generic_handle_irq(irq);
109 irq_exit();
110 set_irq_regs(old_regs);
111}
112
89/* 113/*
90 * show_interrupts is needed by /proc/interrupts. 114 * show_interrupts is needed by /proc/interrupts.
91 */ 115 */
@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
100 for_each_online_cpu(cpu) 124 for_each_online_cpu(cpu)
101 seq_printf(p, "CPU%d ", cpu); 125 seq_printf(p, "CPU%d ", cpu);
102 seq_putc(p, '\n'); 126 seq_putc(p, '\n');
127 goto out;
103 } 128 }
104 if (irq < NR_IRQS) { 129 if (irq < NR_IRQS) {
130 if (irq >= NR_IRQS_BASE)
131 goto out;
105 seq_printf(p, "%s: ", irqclass_main_desc[irq].name); 132 seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
106 for_each_online_cpu(cpu) 133 for_each_online_cpu(cpu)
107 seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); 134 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
108 seq_putc(p, '\n'); 135 seq_putc(p, '\n');
109 goto skip_arch_irqs; 136 goto out;
110 } 137 }
111 for (irq = 0; irq < NR_ARCH_IRQS; irq++) { 138 for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
112 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); 139 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
113 for_each_online_cpu(cpu) 140 for_each_online_cpu(cpu)
114 seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); 141 seq_printf(p, "%10u ",
142 per_cpu(irq_stat, cpu).irqs[irq]);
115 if (irqclass_sub_desc[irq].desc) 143 if (irqclass_sub_desc[irq].desc)
116 seq_printf(p, " %s", irqclass_sub_desc[irq].desc); 144 seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
117 seq_putc(p, '\n'); 145 seq_putc(p, '\n');
118 } 146 }
119skip_arch_irqs: 147out:
120 put_online_cpus(); 148 put_online_cpus();
121 return 0; 149 return 0;
122} 150}
123 151
152int arch_show_interrupts(struct seq_file *p, int prec)
153{
154 return 0;
155}
156
124/* 157/*
125 * Switch to the asynchronous interrupt stack for softirq execution. 158 * Switch to the asynchronous interrupt stack for softirq execution.
126 */ 159 */
@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
159 local_irq_restore(flags); 192 local_irq_restore(flags);
160} 193}
161 194
162#ifdef CONFIG_PROC_FS
163void init_irq_proc(void)
164{
165 if (proc_mkdir("irq", NULL))
166 create_prof_cpu_mask();
167}
168#endif
169
170/* 195/*
171 * ext_int_hash[index] is the list head for all external interrupts that hash 196 * ext_int_hash[index] is the list head for all external interrupts that hash
172 * to this index. 197 * to this index.
@@ -183,14 +208,6 @@ struct ext_int_info {
183/* ext_int_hash_lock protects the handler lists for external interrupts */ 208/* ext_int_hash_lock protects the handler lists for external interrupts */
184DEFINE_SPINLOCK(ext_int_hash_lock); 209DEFINE_SPINLOCK(ext_int_hash_lock);
185 210
186static void __init init_external_interrupts(void)
187{
188 int idx;
189
190 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
191 INIT_LIST_HEAD(&ext_int_hash[idx]);
192}
193
194static inline int ext_hash(u16 code) 211static inline int ext_hash(u16 code)
195{ 212{
196 return (code + (code >> 9)) & 0xff; 213 return (code + (code >> 9)) & 0xff;
@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
234} 251}
235EXPORT_SYMBOL(unregister_external_interrupt); 252EXPORT_SYMBOL(unregister_external_interrupt);
236 253
237void __irq_entry do_extint(struct pt_regs *regs) 254static irqreturn_t do_ext_interrupt(int irq, void *dummy)
238{ 255{
256 struct pt_regs *regs = get_irq_regs();
239 struct ext_code ext_code; 257 struct ext_code ext_code;
240 struct pt_regs *old_regs;
241 struct ext_int_info *p; 258 struct ext_int_info *p;
242 int index; 259 int index;
243 260
244 old_regs = set_irq_regs(regs);
245 irq_enter();
246 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
247 /* Serve timer interrupts first. */
248 clock_comparator_work();
249 }
250 kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
251 ext_code = *(struct ext_code *) &regs->int_code; 261 ext_code = *(struct ext_code *) &regs->int_code;
252 if (ext_code.code != 0x1004) 262 if (ext_code.code != 0x1004)
253 __get_cpu_var(s390_idle).nohz_delay = 1; 263 __get_cpu_var(s390_idle).nohz_delay = 1;
@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
259 p->handler(ext_code, regs->int_parm, 269 p->handler(ext_code, regs->int_parm,
260 regs->int_parm_long); 270 regs->int_parm_long);
261 rcu_read_unlock(); 271 rcu_read_unlock();
262 irq_exit(); 272
263 set_irq_regs(old_regs); 273 return IRQ_HANDLED;
264} 274}
265 275
266void __init init_IRQ(void) 276static struct irqaction external_interrupt = {
277 .name = "EXT",
278 .handler = do_ext_interrupt,
279};
280
281void __init init_ext_interrupts(void)
267{ 282{
268 init_external_interrupts(); 283 int idx;
284
285 for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
286 INIT_LIST_HEAD(&ext_int_hash[idx]);
287
288 irq_set_chip_and_handler(EXT_INTERRUPT,
289 &dummy_irq_chip, handle_percpu_irq);
290 setup_irq(EXT_INTERRUPT, &external_interrupt);
269} 291}
270 292
271static DEFINE_SPINLOCK(sc_irq_lock); 293static DEFINE_SPINLOCK(sc_irq_lock);
@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
313 spin_unlock(&ma_subclass_lock); 335 spin_unlock(&ma_subclass_lock);
314} 336}
315EXPORT_SYMBOL(measurement_alert_subclass_unregister); 337EXPORT_SYMBOL(measurement_alert_subclass_unregister);
316
317#ifdef CONFIG_SMP
318void synchronize_irq(unsigned int irq)
319{
320 /*
321 * Not needed, the handler is protected by a lock and IRQs that occur
322 * after the handler is deleted are just NOPs.
323 */
324}
325EXPORT_SYMBOL_GPL(synchronize_irq);
326#endif
327
328#ifndef CONFIG_PCI
329
330/* Only PCI devices have dynamically-defined IRQ handlers */
331
332int request_irq(unsigned int irq, irq_handler_t handler,
333 unsigned long irqflags, const char *devname, void *dev_id)
334{
335 return -EINVAL;
336}
337EXPORT_SYMBOL_GPL(request_irq);
338
339void free_irq(unsigned int irq, void *dev_id)
340{
341 WARN_ON(1);
342}
343EXPORT_SYMBOL_GPL(free_irq);
344
345void enable_irq(unsigned int irq)
346{
347 WARN_ON(1);
348}
349EXPORT_SYMBOL_GPL(enable_irq);
350
351void disable_irq(unsigned int irq)
352{
353 WARN_ON(1);
354}
355EXPORT_SYMBOL_GPL(disable_irq);
356
357#endif /* !CONFIG_PCI */
358
359void disable_irq_nosync(unsigned int irq)
360{
361 disable_irq(irq);
362}
363EXPORT_SYMBOL_GPL(disable_irq_nosync);
364
365unsigned long probe_irq_on(void)
366{
367 return 0;
368}
369EXPORT_SYMBOL_GPL(probe_irq_on);
370
371int probe_irq_off(unsigned long val)
372{
373 return 0;
374}
375EXPORT_SYMBOL_GPL(probe_irq_off);
376
377unsigned int probe_irq_mask(unsigned long val)
378{
379 return val;
380}
381EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3388b2b2a07d..adbbe7f1cb0d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -105,14 +105,31 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
105 fixup |= FIXUP_RETURN_REGISTER; 105 fixup |= FIXUP_RETURN_REGISTER;
106 break; 106 break;
107 case 0xeb: 107 case 0xeb:
108 if ((insn[2] & 0xff) == 0x44 || /* bxhg */ 108 switch (insn[2] & 0xff) {
109 (insn[2] & 0xff) == 0x45) /* bxleg */ 109 case 0x44: /* bxhg */
110 case 0x45: /* bxleg */
110 fixup = FIXUP_BRANCH_NOT_TAKEN; 111 fixup = FIXUP_BRANCH_NOT_TAKEN;
112 break;
113 }
111 break; 114 break;
112 case 0xe3: /* bctg */ 115 case 0xe3: /* bctg */
113 if ((insn[2] & 0xff) == 0x46) 116 if ((insn[2] & 0xff) == 0x46)
114 fixup = FIXUP_BRANCH_NOT_TAKEN; 117 fixup = FIXUP_BRANCH_NOT_TAKEN;
115 break; 118 break;
119 case 0xec:
120 switch (insn[2] & 0xff) {
121 case 0xe5: /* clgrb */
122 case 0xe6: /* cgrb */
123 case 0xf6: /* crb */
124 case 0xf7: /* clrb */
125 case 0xfc: /* cgib */
126 case 0xfd: /* cglib */
127 case 0xfe: /* cib */
128 case 0xff: /* clib */
129 fixup = FIXUP_BRANCH_NOT_TAKEN;
130 break;
131 }
132 break;
116 } 133 }
117 return fixup; 134 return fixup;
118} 135}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 504175ebf8b0..c4c033819879 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
214 : "0", "cc"); 214 : "0", "cc");
215#endif 215#endif
216 /* Revalidate clock comparator register */ 216 /* Revalidate clock comparator register */
217 if (S390_lowcore.clock_comparator == -1) 217 set_clock_comparator(S390_lowcore.clock_comparator);
218 set_clock_comparator(S390_lowcore.mcck_clock);
219 else
220 set_clock_comparator(S390_lowcore.clock_comparator);
221 /* Check if old PSW is valid */ 218 /* Check if old PSW is valid */
222 if (!mci->wp) 219 if (!mci->wp)
223 /* 220 /*
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037671b1..500aa1029bcb 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
52 52
53static bool is_in_guest(struct pt_regs *regs) 53static bool is_in_guest(struct pt_regs *regs)
54{ 54{
55 unsigned long ip = instruction_pointer(regs);
56
57 if (user_mode(regs)) 55 if (user_mode(regs))
58 return false; 56 return false;
59 57#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
60 return ip == (unsigned long) &sie_exit; 58 return instruction_pointer(regs) == (unsigned long) &sie_exit;
59#else
60 return false;
61#endif
61} 62}
62 63
63static unsigned long guest_is_user_mode(struct pt_regs *regs) 64static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 2bc3eddae34a..c5dbb335716d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -71,6 +71,7 @@ void arch_cpu_idle(void)
71 } 71 }
72 /* Halt the cpu and keep track of cpu time accounting. */ 72 /* Halt the cpu and keep track of cpu time accounting. */
73 vtime_stop_cpu(); 73 vtime_stop_cpu();
74 local_irq_enable();
74} 75}
75 76
76void arch_cpu_idle_exit(void) 77void arch_cpu_idle_exit(void)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e9fadb04e3c6..9556905bd3ce 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -60,11 +60,11 @@ void update_cr_regs(struct task_struct *task)
60 60
61 __ctl_store(cr, 0, 2); 61 __ctl_store(cr, 0, 2);
62 cr_new[1] = cr[1]; 62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */ 63 /* Set or clear transaction execution TXC bit 8. */
64 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
65 cr_new[0] = cr[0] & ~(3UL << 54); 65 cr_new[0] = cr[0] & ~(1UL << 55);
66 else 66 else
67 cr_new[0] = cr[0] | (3UL << 54); 67 cr_new[0] = cr[0] | (1UL << 55);
68 /* Set or clear transaction execution TDC bits 62 and 63. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
69 cr_new[2] = cr[2] & ~3UL; 69 cr_new[2] = cr[2] & ~3UL;
70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { 70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name)
1299 1299
1300 if (!name || *name != 'r') 1300 if (!name || *name != 'r')
1301 return -EINVAL; 1301 return -EINVAL;
1302 if (strict_strtoul(name + 1, 10, &offset)) 1302 if (kstrtoul(name + 1, 10, &offset))
1303 return -EINVAL; 1303 return -EINVAL;
1304 if (offset >= NUM_GPRS) 1304 if (offset >= NUM_GPRS)
1305 return -EINVAL; 1305 return -EINVAL;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451ec5e26..aeed8a61fa0d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
994 strcpy(elf_platform, "z196"); 994 strcpy(elf_platform, "z196");
995 break; 995 break;
996 case 0x2827: 996 case 0x2827:
997 case 0x2828:
997 strcpy(elf_platform, "zEC12"); 998 strcpy(elf_platform, "zEC12");
998 break; 999 break;
999 } 1000 }
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index c479d2f9605b..737bff38e3ee 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -10,6 +10,9 @@
10#include <linux/suspend.h> 10#include <linux/suspend.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <asm/ctl_reg.h> 12#include <asm/ctl_reg.h>
13#include <asm/ipl.h>
14#include <asm/cio.h>
15#include <asm/pci.h>
13 16
14/* 17/*
15 * References to section boundaries 18 * References to section boundaries
@@ -211,3 +214,11 @@ void restore_processor_state(void)
211 __ctl_set_bit(0,28); 214 __ctl_set_bit(0,28);
212 local_mcck_enable(); 215 local_mcck_enable();
213} 216}
217
218/* Called at the end of swsusp_arch_resume */
219void s390_early_resume(void)
220{
221 lgr_info_log();
222 channel_subsystem_reinit();
223 zpci_rescan();
224}
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index c487be4cfc81..6b09fdffbd2f 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -281,11 +281,8 @@ restore_registers:
281 lghi %r2,0 281 lghi %r2,0
282 brasl %r14,arch_set_page_states 282 brasl %r14,arch_set_page_states
283 283
284 /* Log potential guest relocation */ 284 /* Call arch specific early resume code */
285 brasl %r14,lgr_info_log 285 brasl %r14,s390_early_resume
286
287 /* Reinitialize the channel subsystem */
288 brasl %r14,channel_subsystem_reinit
289 286
290 /* Return 0 */ 287 /* Return 0 */
291 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) 288 lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 876546b9cfa1..064c3082ab33 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -92,7 +92,6 @@ void clock_comparator_work(void)
92 struct clock_event_device *cd; 92 struct clock_event_device *cd;
93 93
94 S390_lowcore.clock_comparator = -1ULL; 94 S390_lowcore.clock_comparator = -1ULL;
95 set_clock_comparator(S390_lowcore.clock_comparator);
96 cd = &__get_cpu_var(comparators); 95 cd = &__get_cpu_var(comparators);
97 cd->event_handler(cd); 96 cd->event_handler(cd);
98} 97}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index d7776281cb60..05d75c413137 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s)
63 else if (strncmp(s, "off", 4) == 0) 63 else if (strncmp(s, "off", 4) == 0)
64 vdso_enabled = 0; 64 vdso_enabled = 0;
65 else { 65 else {
66 rc = strict_strtoul(s, 0, &val); 66 rc = kstrtoul(s, 0, &val);
67 vdso_enabled = rc ? 0 : !!val; 67 vdso_enabled = rc ? 0 : !!val;
68 } 68 }
69 return !rc; 69 return !rc;
@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
113 113
114 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, 114 clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
115 PAGE_SIZE << SEGMENT_ORDER); 115 PAGE_SIZE << SEGMENT_ORDER);
116 clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, 116 clear_table((unsigned long *) page_table, _PAGE_INVALID,
117 256*sizeof(unsigned long)); 117 256*sizeof(unsigned long));
118 118
119 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; 119 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
120 *(unsigned long *) page_table = _PAGE_RO + page_frame; 120 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
121 121
122 psal = (u32 *) (page_table + 256*sizeof(unsigned long)); 122 psal = (u32 *) (page_table + 256*sizeof(unsigned long));
123 aste = psal + 32; 123 aste = psal + 32;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9b9c1b78ec67..abcfab55f99b 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -19,6 +19,7 @@
19#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
20#include <asm/cputime.h> 20#include <asm/cputime.h>
21#include <asm/vtimer.h> 21#include <asm/vtimer.h>
22#include <asm/vtime.h>
22#include <asm/irq.h> 23#include <asm/irq.h>
23#include "entry.h" 24#include "entry.h"
24 25
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3074475c8ae0..3a74d8af0d69 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -119,12 +119,21 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
119 * The layout is as follows: 119 * The layout is as follows:
120 * - gpr 2 contains the subchannel id (passed as addr) 120 * - gpr 2 contains the subchannel id (passed as addr)
121 * - gpr 3 contains the virtqueue index (passed as datamatch) 121 * - gpr 3 contains the virtqueue index (passed as datamatch)
122 * - gpr 4 contains the index on the bus (optionally)
122 */ 123 */
123 ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS, 124 ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
124 vcpu->run->s.regs.gprs[2], 125 vcpu->run->s.regs.gprs[2],
125 8, &vcpu->run->s.regs.gprs[3]); 126 8, &vcpu->run->s.regs.gprs[3],
127 vcpu->run->s.regs.gprs[4]);
126 srcu_read_unlock(&vcpu->kvm->srcu, idx); 128 srcu_read_unlock(&vcpu->kvm->srcu, idx);
127 /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */ 129
130 /*
131 * Return cookie in gpr 2, but don't overwrite the register if the
132 * diagnose will be handled by userspace.
133 */
134 if (ret != -EOPNOTSUPP)
135 vcpu->run->s.regs.gprs[2] = ret;
136 /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
128 return ret < 0 ? ret : 0; 137 return ret < 0 ? ret : 0;
129} 138}
130 139
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 302e0e52b009..99d789e8a018 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -42,9 +42,11 @@ static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
42({ \ 42({ \
43 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ 43 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
44 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 44 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
45 int __ret = PTR_RET((void __force *)__uptr); \ 45 int __ret; \
46 \ 46 \
47 if (!__ret) { \ 47 if (IS_ERR((void __force *)__uptr)) { \
48 __ret = PTR_ERR((void __force *)__uptr); \
49 } else { \
48 BUG_ON((unsigned long)__uptr & __mask); \ 50 BUG_ON((unsigned long)__uptr & __mask); \
49 __ret = get_user(x, __uptr); \ 51 __ret = get_user(x, __uptr); \
50 } \ 52 } \
@@ -55,9 +57,11 @@ static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
55({ \ 57({ \
56 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\ 58 __typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
57 int __mask = sizeof(__typeof__(*(gptr))) - 1; \ 59 int __mask = sizeof(__typeof__(*(gptr))) - 1; \
58 int __ret = PTR_RET((void __force *)__uptr); \ 60 int __ret; \
59 \ 61 \
60 if (!__ret) { \ 62 if (IS_ERR((void __force *)__uptr)) { \
63 __ret = PTR_ERR((void __force *)__uptr); \
64 } else { \
61 BUG_ON((unsigned long)__uptr & __mask); \ 65 BUG_ON((unsigned long)__uptr & __mask); \
62 __ret = put_user(x, __uptr); \ 66 __ret = put_user(x, __uptr); \
63 } \ 67 } \
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2ba51e..776dafe918db 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
28#include <asm/pgtable.h> 28#include <asm/pgtable.h>
29#include <asm/nmi.h> 29#include <asm/nmi.h>
30#include <asm/switch_to.h> 30#include <asm/switch_to.h>
31#include <asm/facility.h>
31#include <asm/sclp.h> 32#include <asm/sclp.h>
32#include "kvm-s390.h" 33#include "kvm-s390.h"
33#include "gaccess.h" 34#include "gaccess.h"
@@ -84,9 +85,15 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
84 { NULL } 85 { NULL }
85}; 86};
86 87
87static unsigned long long *facilities; 88unsigned long *vfacilities;
88static struct gmap_notifier gmap_notifier; 89static struct gmap_notifier gmap_notifier;
89 90
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
90/* Section: not file related */ 97/* Section: not file related */
91int kvm_arch_hardware_enable(void *garbage) 98int kvm_arch_hardware_enable(void *garbage)
92{ 99{
@@ -387,7 +394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
387 vcpu->arch.sie_block->ecb = 6; 394 vcpu->arch.sie_block->ecb = 6;
388 vcpu->arch.sie_block->ecb2 = 8; 395 vcpu->arch.sie_block->ecb2 = 8;
389 vcpu->arch.sie_block->eca = 0xC1002001U; 396 vcpu->arch.sie_block->eca = 0xC1002001U;
390 vcpu->arch.sie_block->fac = (int) (long) facilities; 397 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
391 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); 398 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
392 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, 399 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
393 (unsigned long) vcpu); 400 (unsigned long) vcpu);
@@ -702,14 +709,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
702 return rc; 709 return rc;
703 710
704 vcpu->arch.sie_block->icptcode = 0; 711 vcpu->arch.sie_block->icptcode = 0;
705 preempt_disable();
706 kvm_guest_enter();
707 preempt_enable();
708 VCPU_EVENT(vcpu, 6, "entering sie flags %x", 712 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
709 atomic_read(&vcpu->arch.sie_block->cpuflags)); 713 atomic_read(&vcpu->arch.sie_block->cpuflags));
710 trace_kvm_s390_sie_enter(vcpu, 714 trace_kvm_s390_sie_enter(vcpu,
711 atomic_read(&vcpu->arch.sie_block->cpuflags)); 715 atomic_read(&vcpu->arch.sie_block->cpuflags));
716
717 /*
718 * As PF_VCPU will be used in fault handler, between guest_enter
719 * and guest_exit should be no uaccess.
720 */
721 preempt_disable();
722 kvm_guest_enter();
723 preempt_enable();
712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); 724 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
725 kvm_guest_exit();
726
727 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
728 vcpu->arch.sie_block->icptcode);
729 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
730
713 if (rc > 0) 731 if (rc > 0)
714 rc = 0; 732 rc = 0;
715 if (rc < 0) { 733 if (rc < 0) {
@@ -721,10 +739,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
721 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 739 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
722 } 740 }
723 } 741 }
724 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
725 vcpu->arch.sie_block->icptcode);
726 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
727 kvm_guest_exit();
728 742
729 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); 743 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
730 return rc; 744 return rc;
@@ -1056,6 +1070,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1056 return 0; 1070 return 0;
1057} 1071}
1058 1072
1073void kvm_arch_memslots_updated(struct kvm *kvm)
1074{
1075}
1076
1059/* Section: memory related */ 1077/* Section: memory related */
1060int kvm_arch_prepare_memory_region(struct kvm *kvm, 1078int kvm_arch_prepare_memory_region(struct kvm *kvm,
1061 struct kvm_memory_slot *memslot, 1079 struct kvm_memory_slot *memslot,
@@ -1122,20 +1140,20 @@ static int __init kvm_s390_init(void)
1122 * to hold the maximum amount of facilities. On the other hand, we 1140 * to hold the maximum amount of facilities. On the other hand, we
1123 * only set facilities that are known to work in KVM. 1141 * only set facilities that are known to work in KVM.
1124 */ 1142 */
1125 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA); 1143 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1126 if (!facilities) { 1144 if (!vfacilities) {
1127 kvm_exit(); 1145 kvm_exit();
1128 return -ENOMEM; 1146 return -ENOMEM;
1129 } 1147 }
1130 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 1148 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1131 facilities[0] &= 0xff82fff3f47c0000ULL; 1149 vfacilities[0] &= 0xff82fff3f47c0000UL;
1132 facilities[1] &= 0x001c000000000000ULL; 1150 vfacilities[1] &= 0x001c000000000000UL;
1133 return 0; 1151 return 0;
1134} 1152}
1135 1153
1136static void __exit kvm_s390_exit(void) 1154static void __exit kvm_s390_exit(void)
1137{ 1155{
1138 free_page((unsigned long) facilities); 1156 free_page((unsigned long) vfacilities);
1139 kvm_exit(); 1157 kvm_exit();
1140} 1158}
1141 1159
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 028ca9fd2158..dc99f1ca4267 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -24,6 +24,9 @@
24 24
25typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu); 25typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
26 26
27/* declare vfacilities extern */
28extern unsigned long *vfacilities;
29
27/* negativ values are error codes, positive values for internal conditions */ 30/* negativ values are error codes, positive values for internal conditions */
28#define SIE_INTERCEPT_RERUNVCPU (1<<0) 31#define SIE_INTERCEPT_RERUNVCPU (1<<0)
29#define SIE_INTERCEPT_UCONTROL (1<<1) 32#define SIE_INTERCEPT_UCONTROL (1<<1)
@@ -112,6 +115,13 @@ static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
112 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 115 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
113} 116}
114 117
118/* Set the condition code in the guest program status word */
119static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
120{
121 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
122 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
123}
124
115int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); 125int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
116enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); 126enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
117void kvm_s390_tasklet(unsigned long parm); 127void kvm_s390_tasklet(unsigned long parm);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6eb6be6..59200ee275e5 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/compat.h> 17#include <linux/compat.h>
18#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/facility.h>
19#include <asm/current.h> 20#include <asm/current.h>
20#include <asm/debug.h> 21#include <asm/debug.h>
21#include <asm/ebcdic.h> 22#include <asm/ebcdic.h>
@@ -163,8 +164,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
163 kfree(inti); 164 kfree(inti);
164no_interrupt: 165no_interrupt:
165 /* Set condition code and we're done. */ 166 /* Set condition code and we're done. */
166 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 167 kvm_s390_set_psw_cc(vcpu, cc);
167 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
168 return 0; 168 return 0;
169} 169}
170 170
@@ -219,15 +219,13 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
219 * Set condition code 3 to stop the guest from issueing channel 219 * Set condition code 3 to stop the guest from issueing channel
220 * I/O instructions. 220 * I/O instructions.
221 */ 221 */
222 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 222 kvm_s390_set_psw_cc(vcpu, 3);
223 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
224 return 0; 223 return 0;
225 } 224 }
226} 225}
227 226
228static int handle_stfl(struct kvm_vcpu *vcpu) 227static int handle_stfl(struct kvm_vcpu *vcpu)
229{ 228{
230 unsigned int facility_list;
231 int rc; 229 int rc;
232 230
233 vcpu->stat.instruction_stfl++; 231 vcpu->stat.instruction_stfl++;
@@ -235,15 +233,13 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 233 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 234 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
237 235
238 /* only pass the facility bits, which we can handle */
239 facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
240
241 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), 236 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
242 &facility_list, sizeof(facility_list)); 237 vfacilities, 4);
243 if (rc) 238 if (rc)
244 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); 239 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
245 VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); 240 VCPU_EVENT(vcpu, 5, "store facility list value %x",
246 trace_kvm_s390_handle_stfl(vcpu, facility_list); 241 *(unsigned int *) vfacilities);
242 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
247 return 0; 243 return 0;
248} 244}
249 245
@@ -386,7 +382,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
386 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 382 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
387 383
388 if (fc > 3) { 384 if (fc > 3) {
389 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */ 385 kvm_s390_set_psw_cc(vcpu, 3);
390 return 0; 386 return 0;
391 } 387 }
392 388
@@ -396,7 +392,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
396 392
397 if (fc == 0) { 393 if (fc == 0) {
398 vcpu->run->s.regs.gprs[0] = 3 << 28; 394 vcpu->run->s.regs.gprs[0] = 3 << 28;
399 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */ 395 kvm_s390_set_psw_cc(vcpu, 0);
400 return 0; 396 return 0;
401 } 397 }
402 398
@@ -430,12 +426,11 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
430 } 426 }
431 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 427 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
432 free_page(mem); 428 free_page(mem);
433 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); 429 kvm_s390_set_psw_cc(vcpu, 0);
434 vcpu->run->s.regs.gprs[0] = 0; 430 vcpu->run->s.regs.gprs[0] = 0;
435 return 0; 431 return 0;
436out_no_data: 432out_no_data:
437 /* condition code 3 */ 433 kvm_s390_set_psw_cc(vcpu, 3);
438 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
439out_exception: 434out_exception:
440 free_page(mem); 435 free_page(mem);
441 return rc; 436 return rc;
@@ -493,12 +488,12 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
493 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 488 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
494 489
495 /* This basically extracts the mask half of the psw. */ 490 /* This basically extracts the mask half of the psw. */
496 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; 491 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
497 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; 492 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
498 if (reg2) { 493 if (reg2) {
499 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; 494 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
500 vcpu->run->s.regs.gprs[reg2] |= 495 vcpu->run->s.regs.gprs[reg2] |=
501 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; 496 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
502 } 497 }
503 return 0; 498 return 0;
504} 499}
@@ -532,8 +527,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
532 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 527 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
533 528
534 /* Only provide non-quiescing support if the host supports it */ 529 /* Only provide non-quiescing support if the host supports it */
535 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && 530 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
536 S390_lowcore.stfl_fac_list & 0x00020000)
537 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 531 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
538 532
539 /* No support for conditional-SSKE */ 533 /* No support for conditional-SSKE */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index c61b9fad43cc..57c87d7d7ede 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -44,7 +44,6 @@ static void __udelay_disabled(unsigned long long usecs)
44 do { 44 do {
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 vtime_stop_cpu();
47 local_irq_disable();
48 } while (get_tod_clock() < end); 47 } while (get_tod_clock() < end);
49 lockdep_on(); 48 lockdep_on();
50 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0, 0, 0);
@@ -64,7 +63,6 @@ static void __udelay_enabled(unsigned long long usecs)
64 set_clock_comparator(end); 63 set_clock_comparator(end);
65 } 64 }
66 vtime_stop_cpu(); 65 vtime_stop_cpu();
67 local_irq_disable();
68 if (clock_saved) 66 if (clock_saved)
69 local_tick_enable(clock_saved); 67 local_tick_enable(clock_saved);
70 } while (get_tod_clock() < end); 68 } while (get_tod_clock() < end);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 50ea137a2d3c..1694d738b175 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { 86 switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
87 case _ASCE_TYPE_REGION1: 87 case _ASCE_TYPE_REGION1:
88 table = table + ((address >> 53) & 0x7ff); 88 table = table + ((address >> 53) & 0x7ff);
89 if (unlikely(*table & _REGION_ENTRY_INV)) 89 if (unlikely(*table & _REGION_ENTRY_INVALID))
90 return -0x39UL; 90 return -0x39UL;
91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 91 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
92 /* fallthrough */ 92 /* fallthrough */
93 case _ASCE_TYPE_REGION2: 93 case _ASCE_TYPE_REGION2:
94 table = table + ((address >> 42) & 0x7ff); 94 table = table + ((address >> 42) & 0x7ff);
95 if (unlikely(*table & _REGION_ENTRY_INV)) 95 if (unlikely(*table & _REGION_ENTRY_INVALID))
96 return -0x3aUL; 96 return -0x3aUL;
97 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 97 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
98 /* fallthrough */ 98 /* fallthrough */
99 case _ASCE_TYPE_REGION3: 99 case _ASCE_TYPE_REGION3:
100 table = table + ((address >> 31) & 0x7ff); 100 table = table + ((address >> 31) & 0x7ff);
101 if (unlikely(*table & _REGION_ENTRY_INV)) 101 if (unlikely(*table & _REGION_ENTRY_INVALID))
102 return -0x3bUL; 102 return -0x3bUL;
103 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 103 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
104 /* fallthrough */ 104 /* fallthrough */
105 case _ASCE_TYPE_SEGMENT: 105 case _ASCE_TYPE_SEGMENT:
106 table = table + ((address >> 20) & 0x7ff); 106 table = table + ((address >> 20) & 0x7ff);
107 if (unlikely(*table & _SEGMENT_ENTRY_INV)) 107 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
108 return -0x10UL; 108 return -0x10UL;
109 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { 109 if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
110 if (write && (*table & _SEGMENT_ENTRY_RO)) 110 if (write && (*table & _SEGMENT_ENTRY_PROTECT))
111 return -0x04UL; 111 return -0x04UL;
112 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + 112 return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
113 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); 113 (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
117 table = table + ((address >> 12) & 0xff); 117 table = table + ((address >> 12) & 0xff);
118 if (unlikely(*table & _PAGE_INVALID)) 118 if (unlikely(*table & _PAGE_INVALID))
119 return -0x11UL; 119 return -0x11UL;
120 if (write && (*table & _PAGE_RO)) 120 if (write && (*table & _PAGE_PROTECT))
121 return -0x04UL; 121 return -0x04UL;
122 return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 122 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
123} 123}
@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
130 unsigned long *table = (unsigned long *)__pa(mm->pgd); 130 unsigned long *table = (unsigned long *)__pa(mm->pgd);
131 131
132 table = table + ((address >> 20) & 0x7ff); 132 table = table + ((address >> 20) & 0x7ff);
133 if (unlikely(*table & _SEGMENT_ENTRY_INV)) 133 if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
134 return -0x10UL; 134 return -0x10UL;
135 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); 135 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
136 table = table + ((address >> 12) & 0xff); 136 table = table + ((address >> 12) & 0xff);
137 if (unlikely(*table & _PAGE_INVALID)) 137 if (unlikely(*table & _PAGE_INVALID))
138 return -0x11UL; 138 return -0x11UL;
139 if (write && (*table & _PAGE_RO)) 139 if (write && (*table & _PAGE_PROTECT))
140 return -0x04UL; 140 return -0x04UL;
141 return (*table & PAGE_MASK) + (address & ~PAGE_MASK); 141 return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
142} 142}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 3ad65b04ac15..46d517c3c763 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
53 seq_printf(m, "I\n"); 53 seq_printf(m, "I\n");
54 return; 54 return;
55 } 55 }
56 seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); 56 seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); 57 seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
58 seq_putc(m, '\n'); 58 seq_putc(m, '\n');
59} 59}
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
105} 105}
106 106
107/* 107/*
108 * The actual page table walker functions. In order to keep the implementation 108 * The actual page table walker functions. In order to keep the
109 * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO 109 * implementation of print_prot() short, we only check and pass
110 * flags to note_page() if a region, segment or page table entry is invalid or 110 * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
111 * read-only. 111 * segment or page table entry is invalid or read-only.
112 * After all it's just a hint that the current level being walked contains an 112 * After all it's just a hint that the current level being walked
113 * invalid or read-only entry. 113 * contains an invalid or read-only entry.
114 */ 114 */
115static void walk_pte_level(struct seq_file *m, struct pg_state *st, 115static void walk_pte_level(struct seq_file *m, struct pg_state *st,
116 pmd_t *pmd, unsigned long addr) 116 pmd_t *pmd, unsigned long addr)
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { 122 for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
123 st->current_address = addr; 123 st->current_address = addr;
124 pte = pte_offset_kernel(pmd, addr); 124 pte = pte_offset_kernel(pmd, addr);
125 prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); 125 prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
126 note_page(m, st, prot, 4); 126 note_page(m, st, prot, 4);
127 addr += PAGE_SIZE; 127 addr += PAGE_SIZE;
128 } 128 }
129} 129}
130 130
131#ifdef CONFIG_64BIT 131#ifdef CONFIG_64BIT
132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) 132#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
133#else 133#else
134#define _PMD_PROT_MASK 0 134#define _PMD_PROT_MASK 0
135#endif 135#endif
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1f5315d1215c..5d758db27bdc 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
24 pte_t *ptep, pte; 24 pte_t *ptep, pte;
25 struct page *page; 25 struct page *page;
26 26
27 mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; 27 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
28 28
29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); 29 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
30 do { 30 do {
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
55 struct page *head, *page, *tail; 55 struct page *head, *page, *tail;
56 int refs; 56 int refs;
57 57
58 result = write ? 0 : _SEGMENT_ENTRY_RO; 58 result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
59 mask = result | _SEGMENT_ENTRY_INV; 59 mask = result | _SEGMENT_ENTRY_INVALID;
60 if ((pmd_val(pmd) & mask) != result) 60 if ((pmd_val(pmd) & mask) != result)
61 return 0; 61 return 0;
62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); 62 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 121089d57802..248445f92604 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -8,21 +8,127 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9#include <linux/hugetlb.h> 9#include <linux/hugetlb.h>
10 10
11static inline pmd_t __pte_to_pmd(pte_t pte)
12{
13 int none, young, prot;
14 pmd_t pmd;
15
16 /*
17 * Convert encoding pte bits pmd bits
18 * .IR...wrdytp ..R...I...y.
19 * empty .10...000000 -> ..0...1...0.
20 * prot-none, clean, old .11...000001 -> ..0...1...1.
21 * prot-none, clean, young .11...000101 -> ..1...1...1.
22 * prot-none, dirty, old .10...001001 -> ..0...1...1.
23 * prot-none, dirty, young .10...001101 -> ..1...1...1.
24 * read-only, clean, old .11...010001 -> ..1...1...0.
25 * read-only, clean, young .01...010101 -> ..1...0...1.
26 * read-only, dirty, old .11...011001 -> ..1...1...0.
27 * read-only, dirty, young .01...011101 -> ..1...0...1.
28 * read-write, clean, old .11...110001 -> ..0...1...0.
29 * read-write, clean, young .01...110101 -> ..0...0...1.
30 * read-write, dirty, old .10...111001 -> ..0...1...0.
31 * read-write, dirty, young .00...111101 -> ..0...0...1.
32 * Huge ptes are dirty by definition, a clean pte is made dirty
33 * by the conversion.
34 */
35 if (pte_present(pte)) {
36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
37 if (pte_val(pte) & _PAGE_INVALID)
38 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
39 none = (pte_val(pte) & _PAGE_PRESENT) &&
40 !(pte_val(pte) & _PAGE_READ) &&
41 !(pte_val(pte) & _PAGE_WRITE);
42 prot = (pte_val(pte) & _PAGE_PROTECT) &&
43 !(pte_val(pte) & _PAGE_WRITE);
44 young = pte_val(pte) & _PAGE_YOUNG;
45 if (none || young)
46 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
47 if (prot || (none && young))
48 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
49 } else
50 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
51 return pmd;
52}
53
54static inline pte_t __pmd_to_pte(pmd_t pmd)
55{
56 pte_t pte;
57
58 /*
59 * Convert encoding pmd bits pte bits
60 * ..R...I...y. .IR...wrdytp
61 * empty ..0...1...0. -> .10...000000
62 * prot-none, old ..0...1...1. -> .10...001001
63 * prot-none, young ..1...1...1. -> .10...001101
64 * read-only, old ..1...1...0. -> .11...011001
65 * read-only, young ..1...0...1. -> .01...011101
66 * read-write, old ..0...1...0. -> .10...111001
67 * read-write, young ..0...0...1. -> .00...111101
68 * Huge ptes are dirty by definition
69 */
70 if (pmd_present(pmd)) {
71 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
72 (pmd_val(pmd) & PAGE_MASK);
73 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
74 pte_val(pte) |= _PAGE_INVALID;
75 if (pmd_prot_none(pmd)) {
76 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
77 pte_val(pte) |= _PAGE_YOUNG;
78 } else {
79 pte_val(pte) |= _PAGE_READ;
80 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
81 pte_val(pte) |= _PAGE_PROTECT;
82 else
83 pte_val(pte) |= _PAGE_WRITE;
84 if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
85 pte_val(pte) |= _PAGE_YOUNG;
86 }
87 } else
88 pte_val(pte) = _PAGE_INVALID;
89 return pte;
90}
11 91
12void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 92void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
13 pte_t *pteptr, pte_t pteval) 93 pte_t *ptep, pte_t pte)
14{ 94{
15 pmd_t *pmdp = (pmd_t *) pteptr; 95 pmd_t pmd;
16 unsigned long mask;
17 96
97 pmd = __pte_to_pmd(pte);
18 if (!MACHINE_HAS_HPAGE) { 98 if (!MACHINE_HAS_HPAGE) {
19 pteptr = (pte_t *) pte_page(pteval)[1].index; 99 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
20 mask = pte_val(pteval) & 100 pmd_val(pmd) |= pte_page(pte)[1].index;
21 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); 101 } else
22 pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; 102 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
103 *(pmd_t *) ptep = pmd;
104}
105
106pte_t huge_ptep_get(pte_t *ptep)
107{
108 unsigned long origin;
109 pmd_t pmd;
110
111 pmd = *(pmd_t *) ptep;
112 if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
113 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
114 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
115 pmd_val(pmd) |= *(unsigned long *) origin;
23 } 116 }
117 return __pmd_to_pte(pmd);
118}
24 119
25 pmd_val(*pmdp) = pte_val(pteval); 120pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
121 unsigned long addr, pte_t *ptep)
122{
123 pmd_t *pmdp = (pmd_t *) ptep;
124 pte_t pte = huge_ptep_get(ptep);
125
126 if (MACHINE_HAS_IDTE)
127 __pmd_idte(addr, pmdp);
128 else
129 __pmd_csp(pmdp);
130 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
131 return pte;
26} 132}
27 133
28int arch_prepare_hugepage(struct page *page) 134int arch_prepare_hugepage(struct page *page)
@@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page)
58 ptep = (pte_t *) page[1].index; 164 ptep = (pte_t *) page[1].index;
59 if (!ptep) 165 if (!ptep)
60 return; 166 return;
61 clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, 167 clear_table((unsigned long *) ptep, _PAGE_INVALID,
62 PTRS_PER_PTE * sizeof(pte_t)); 168 PTRS_PER_PTE * sizeof(pte_t));
63 page_table_free(&init_mm, (unsigned long *) ptep); 169 page_table_free(&init_mm, (unsigned long *) ptep);
64 page[1].index = 0; 170 page[1].index = 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea80e4f9..ad446b0c55b6 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
69 order = 2; 69 order = 2;
70 break; 70 break;
71 case 0x2827: /* zEC12 */ 71 case 0x2827: /* zEC12 */
72 case 0x2828: /* zEC12 */
72 default: 73 default:
73 order = 5; 74 order = 5;
74 break; 75 break;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 80adfbf75065..990397420e6b 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
118 pte = pte_offset_kernel(pmd, address); 118 pte = pte_offset_kernel(pmd, address);
119 if (!enable) { 119 if (!enable) {
120 __ptep_ipte(address, pte); 120 __ptep_ipte(address, pte);
121 pte_val(*pte) = _PAGE_TYPE_EMPTY; 121 pte_val(*pte) = _PAGE_INVALID;
122 continue; 122 continue;
123 } 123 }
124 pte_val(*pte) = __pa(address); 124 pte_val(*pte) = __pa(address);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a8154a1a2c94..bf7c0dc64a76 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
161 struct gmap_rmap *rmap; 161 struct gmap_rmap *rmap;
162 struct page *page; 162 struct page *page;
163 163
164 if (*table & _SEGMENT_ENTRY_INV) 164 if (*table & _SEGMENT_ENTRY_INVALID)
165 return 0; 165 return 0;
166 page = pfn_to_page(*table >> PAGE_SHIFT); 166 page = pfn_to_page(*table >> PAGE_SHIFT);
167 mp = (struct gmap_pgtable *) page->index; 167 mp = (struct gmap_pgtable *) page->index;
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
172 kfree(rmap); 172 kfree(rmap);
173 break; 173 break;
174 } 174 }
175 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 175 *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
176 return 1; 176 return 1;
177} 177}
178 178
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
258 return -ENOMEM; 258 return -ENOMEM;
259 new = (unsigned long *) page_to_phys(page); 259 new = (unsigned long *) page_to_phys(page);
260 crst_table_init(new, init); 260 crst_table_init(new, init);
261 if (*table & _REGION_ENTRY_INV) { 261 if (*table & _REGION_ENTRY_INVALID) {
262 list_add(&page->lru, &gmap->crst_list); 262 list_add(&page->lru, &gmap->crst_list);
263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH | 263 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264 (*table & _REGION_ENTRY_TYPE_MASK); 264 (*table & _REGION_ENTRY_TYPE_MASK);
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
292 for (off = 0; off < len; off += PMD_SIZE) { 292 for (off = 0; off < len; off += PMD_SIZE) {
293 /* Walk the guest addr space page table */ 293 /* Walk the guest addr space page table */
294 table = gmap->table + (((to + off) >> 53) & 0x7ff); 294 table = gmap->table + (((to + off) >> 53) & 0x7ff);
295 if (*table & _REGION_ENTRY_INV) 295 if (*table & _REGION_ENTRY_INVALID)
296 goto out; 296 goto out;
297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 297 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298 table = table + (((to + off) >> 42) & 0x7ff); 298 table = table + (((to + off) >> 42) & 0x7ff);
299 if (*table & _REGION_ENTRY_INV) 299 if (*table & _REGION_ENTRY_INVALID)
300 goto out; 300 goto out;
301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 301 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302 table = table + (((to + off) >> 31) & 0x7ff); 302 table = table + (((to + off) >> 31) & 0x7ff);
303 if (*table & _REGION_ENTRY_INV) 303 if (*table & _REGION_ENTRY_INVALID)
304 goto out; 304 goto out;
305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 305 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306 table = table + (((to + off) >> 20) & 0x7ff); 306 table = table + (((to + off) >> 20) & 0x7ff);
307 307
308 /* Clear segment table entry in guest address space. */ 308 /* Clear segment table entry in guest address space. */
309 flush |= gmap_unlink_segment(gmap, table); 309 flush |= gmap_unlink_segment(gmap, table);
310 *table = _SEGMENT_ENTRY_INV; 310 *table = _SEGMENT_ENTRY_INVALID;
311 } 311 }
312out: 312out:
313 spin_unlock(&gmap->mm->page_table_lock); 313 spin_unlock(&gmap->mm->page_table_lock);
@@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
335 335
336 if ((from | to | len) & (PMD_SIZE - 1)) 336 if ((from | to | len) & (PMD_SIZE - 1))
337 return -EINVAL; 337 return -EINVAL;
338 if (len == 0 || from + len > PGDIR_SIZE || 338 if (len == 0 || from + len > TASK_MAX_SIZE ||
339 from + len < from || to + len < to) 339 from + len < from || to + len < to)
340 return -EINVAL; 340 return -EINVAL;
341 341
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
345 for (off = 0; off < len; off += PMD_SIZE) { 345 for (off = 0; off < len; off += PMD_SIZE) {
346 /* Walk the gmap address space page table */ 346 /* Walk the gmap address space page table */
347 table = gmap->table + (((to + off) >> 53) & 0x7ff); 347 table = gmap->table + (((to + off) >> 53) & 0x7ff);
348 if ((*table & _REGION_ENTRY_INV) && 348 if ((*table & _REGION_ENTRY_INVALID) &&
349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) 349 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350 goto out_unmap; 350 goto out_unmap;
351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 351 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352 table = table + (((to + off) >> 42) & 0x7ff); 352 table = table + (((to + off) >> 42) & 0x7ff);
353 if ((*table & _REGION_ENTRY_INV) && 353 if ((*table & _REGION_ENTRY_INVALID) &&
354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) 354 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355 goto out_unmap; 355 goto out_unmap;
356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 356 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357 table = table + (((to + off) >> 31) & 0x7ff); 357 table = table + (((to + off) >> 31) & 0x7ff);
358 if ((*table & _REGION_ENTRY_INV) && 358 if ((*table & _REGION_ENTRY_INVALID) &&
359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) 359 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360 goto out_unmap; 360 goto out_unmap;
361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); 361 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
363 363
364 /* Store 'from' address in an invalid segment table entry. */ 364 /* Store 'from' address in an invalid segment table entry. */
365 flush |= gmap_unlink_segment(gmap, table); 365 flush |= gmap_unlink_segment(gmap, table);
366 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); 366 *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
367 _SEGMENT_ENTRY_PROTECT);
367 } 368 }
368 spin_unlock(&gmap->mm->page_table_lock); 369 spin_unlock(&gmap->mm->page_table_lock);
369 up_read(&gmap->mm->mmap_sem); 370 up_read(&gmap->mm->mmap_sem);
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
384 unsigned long *table; 385 unsigned long *table;
385 386
386 table = gmap->table + ((address >> 53) & 0x7ff); 387 table = gmap->table + ((address >> 53) & 0x7ff);
387 if (unlikely(*table & _REGION_ENTRY_INV)) 388 if (unlikely(*table & _REGION_ENTRY_INVALID))
388 return ERR_PTR(-EFAULT); 389 return ERR_PTR(-EFAULT);
389 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 390 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
390 table = table + ((address >> 42) & 0x7ff); 391 table = table + ((address >> 42) & 0x7ff);
391 if (unlikely(*table & _REGION_ENTRY_INV)) 392 if (unlikely(*table & _REGION_ENTRY_INVALID))
392 return ERR_PTR(-EFAULT); 393 return ERR_PTR(-EFAULT);
393 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 394 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
394 table = table + ((address >> 31) & 0x7ff); 395 table = table + ((address >> 31) & 0x7ff);
395 if (unlikely(*table & _REGION_ENTRY_INV)) 396 if (unlikely(*table & _REGION_ENTRY_INVALID))
396 return ERR_PTR(-EFAULT); 397 return ERR_PTR(-EFAULT);
397 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 398 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
398 table = table + ((address >> 20) & 0x7ff); 399 table = table + ((address >> 20) & 0x7ff);
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
422 return PTR_ERR(segment_ptr); 423 return PTR_ERR(segment_ptr);
423 /* Convert the gmap address to an mm address. */ 424 /* Convert the gmap address to an mm address. */
424 segment = *segment_ptr; 425 segment = *segment_ptr;
425 if (!(segment & _SEGMENT_ENTRY_INV)) { 426 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
426 page = pfn_to_page(segment >> PAGE_SHIFT); 427 page = pfn_to_page(segment >> PAGE_SHIFT);
427 mp = (struct gmap_pgtable *) page->index; 428 mp = (struct gmap_pgtable *) page->index;
428 return mp->vmaddr | (address & ~PMD_MASK); 429 return mp->vmaddr | (address & ~PMD_MASK);
429 } else if (segment & _SEGMENT_ENTRY_RO) { 430 } else if (segment & _SEGMENT_ENTRY_PROTECT) {
430 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; 431 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
431 return vmaddr | (address & ~PMD_MASK); 432 return vmaddr | (address & ~PMD_MASK);
432 } 433 }
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
517 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 518 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
518 mp = (struct gmap_pgtable *) page->index; 519 mp = (struct gmap_pgtable *) page->index;
519 list_for_each_entry_safe(rmap, next, &mp->mapper, list) { 520 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
520 *rmap->entry = 521 *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
521 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; 522 _SEGMENT_ENTRY_PROTECT);
522 list_del(&rmap->list); 523 list_del(&rmap->list);
523 kfree(rmap); 524 kfree(rmap);
524 flush = 1; 525 flush = 1;
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
545 /* Convert the gmap address to an mm address. */ 546 /* Convert the gmap address to an mm address. */
546 while (1) { 547 while (1) {
547 segment = *segment_ptr; 548 segment = *segment_ptr;
548 if (!(segment & _SEGMENT_ENTRY_INV)) { 549 if (!(segment & _SEGMENT_ENTRY_INVALID)) {
549 /* Page table is present */ 550 /* Page table is present */
550 page = pfn_to_page(segment >> PAGE_SHIFT); 551 page = pfn_to_page(segment >> PAGE_SHIFT);
551 mp = (struct gmap_pgtable *) page->index; 552 mp = (struct gmap_pgtable *) page->index;
552 return mp->vmaddr | (address & ~PMD_MASK); 553 return mp->vmaddr | (address & ~PMD_MASK);
553 } 554 }
554 if (!(segment & _SEGMENT_ENTRY_RO)) 555 if (!(segment & _SEGMENT_ENTRY_PROTECT))
555 /* Nothing mapped in the gmap address space. */ 556 /* Nothing mapped in the gmap address space. */
556 break; 557 break;
557 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); 558 rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
586 while (address < to) { 587 while (address < to) {
587 /* Walk the gmap address space page table */ 588 /* Walk the gmap address space page table */
588 table = gmap->table + ((address >> 53) & 0x7ff); 589 table = gmap->table + ((address >> 53) & 0x7ff);
589 if (unlikely(*table & _REGION_ENTRY_INV)) { 590 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
590 address = (address + PMD_SIZE) & PMD_MASK; 591 address = (address + PMD_SIZE) & PMD_MASK;
591 continue; 592 continue;
592 } 593 }
593 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 594 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
594 table = table + ((address >> 42) & 0x7ff); 595 table = table + ((address >> 42) & 0x7ff);
595 if (unlikely(*table & _REGION_ENTRY_INV)) { 596 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
596 address = (address + PMD_SIZE) & PMD_MASK; 597 address = (address + PMD_SIZE) & PMD_MASK;
597 continue; 598 continue;
598 } 599 }
599 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 600 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
600 table = table + ((address >> 31) & 0x7ff); 601 table = table + ((address >> 31) & 0x7ff);
601 if (unlikely(*table & _REGION_ENTRY_INV)) { 602 if (unlikely(*table & _REGION_ENTRY_INVALID)) {
602 address = (address + PMD_SIZE) & PMD_MASK; 603 address = (address + PMD_SIZE) & PMD_MASK;
603 continue; 604 continue;
604 } 605 }
605 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); 606 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
606 table = table + ((address >> 20) & 0x7ff); 607 table = table + ((address >> 20) & 0x7ff);
607 if (unlikely(*table & _SEGMENT_ENTRY_INV)) { 608 if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
608 address = (address + PMD_SIZE) & PMD_MASK; 609 address = (address + PMD_SIZE) & PMD_MASK;
609 continue; 610 continue;
610 } 611 }
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
687 continue; 688 continue;
688 /* Set notification bit in the pgste of the pte */ 689 /* Set notification bit in the pgste of the pte */
689 entry = *ptep; 690 entry = *ptep;
690 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { 691 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
691 pgste = pgste_get_lock(ptep); 692 pgste = pgste_get_lock(ptep);
692 pgste_val(pgste) |= PGSTE_IN_BIT; 693 pgste_val(pgste) |= PGSTE_IN_BIT;
693 pgste_set_unlock(ptep, pgste); 694 pgste_set_unlock(ptep, pgste);
@@ -731,6 +732,11 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
731 spin_unlock(&gmap_notifier_lock); 732 spin_unlock(&gmap_notifier_lock);
732} 733}
733 734
735static inline int page_table_with_pgste(struct page *page)
736{
737 return atomic_read(&page->_mapcount) == 0;
738}
739
734static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 740static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
735 unsigned long vmaddr) 741 unsigned long vmaddr)
736{ 742{
@@ -750,10 +756,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
750 mp->vmaddr = vmaddr & PMD_MASK; 756 mp->vmaddr = vmaddr & PMD_MASK;
751 INIT_LIST_HEAD(&mp->mapper); 757 INIT_LIST_HEAD(&mp->mapper);
752 page->index = (unsigned long) mp; 758 page->index = (unsigned long) mp;
753 atomic_set(&page->_mapcount, 3); 759 atomic_set(&page->_mapcount, 0);
754 table = (unsigned long *) page_to_phys(page); 760 table = (unsigned long *) page_to_phys(page);
755 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); 761 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
756 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); 762 clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
763 PAGE_SIZE/2);
757 return table; 764 return table;
758} 765}
759 766
@@ -791,26 +798,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
791 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; 798 pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
792 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; 799 pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
793 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 800 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
794 unsigned long address, bits; 801 unsigned long address, bits, skey;
795 unsigned char skey;
796 802
797 address = pte_val(*ptep) & PAGE_MASK; 803 address = pte_val(*ptep) & PAGE_MASK;
798 skey = page_get_storage_key(address); 804 skey = (unsigned long) page_get_storage_key(address);
799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); 805 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
806 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
800 /* Set storage key ACC and FP */ 807 /* Set storage key ACC and FP */
801 page_set_storage_key(address, 808 page_set_storage_key(address, skey, !nq);
802 (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)),
803 !nq);
804
805 /* Merge host changed & referenced into pgste */ 809 /* Merge host changed & referenced into pgste */
806 pgste_val(new) |= bits << 52; 810 pgste_val(new) |= bits << 52;
807 /* Transfer skey changed & referenced bit to kvm user bits */
808 pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
809 } 811 }
810 /* changing the guest storage key is considered a change of the page */ 812 /* changing the guest storage key is considered a change of the page */
811 if ((pgste_val(new) ^ pgste_val(old)) & 813 if ((pgste_val(new) ^ pgste_val(old)) &
812 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) 814 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
813 pgste_val(new) |= PGSTE_UC_BIT; 815 pgste_val(new) |= PGSTE_HC_BIT;
814 816
815 pgste_set_unlock(ptep, new); 817 pgste_set_unlock(ptep, new);
816 pte_unmap_unlock(*ptep, ptl); 818 pte_unmap_unlock(*ptep, ptl);
@@ -821,6 +823,11 @@ EXPORT_SYMBOL(set_guest_storage_key);
821 823
822#else /* CONFIG_PGSTE */ 824#else /* CONFIG_PGSTE */
823 825
826static inline int page_table_with_pgste(struct page *page)
827{
828 return 0;
829}
830
824static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, 831static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
825 unsigned long vmaddr) 832 unsigned long vmaddr)
826{ 833{
@@ -878,7 +885,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
878 pgtable_page_ctor(page); 885 pgtable_page_ctor(page);
879 atomic_set(&page->_mapcount, 1); 886 atomic_set(&page->_mapcount, 1);
880 table = (unsigned long *) page_to_phys(page); 887 table = (unsigned long *) page_to_phys(page);
881 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); 888 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
882 spin_lock_bh(&mm->context.list_lock); 889 spin_lock_bh(&mm->context.list_lock);
883 list_add(&page->lru, &mm->context.pgtable_list); 890 list_add(&page->lru, &mm->context.pgtable_list);
884 } else { 891 } else {
@@ -897,12 +904,12 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
897 struct page *page; 904 struct page *page;
898 unsigned int bit, mask; 905 unsigned int bit, mask;
899 906
900 if (mm_has_pgste(mm)) { 907 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
908 if (page_table_with_pgste(page)) {
901 gmap_disconnect_pgtable(mm, table); 909 gmap_disconnect_pgtable(mm, table);
902 return page_table_free_pgste(table); 910 return page_table_free_pgste(table);
903 } 911 }
904 /* Free 1K/2K page table fragment of a 4K page */ 912 /* Free 1K/2K page table fragment of a 4K page */
905 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
906 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t))); 913 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
907 spin_lock_bh(&mm->context.list_lock); 914 spin_lock_bh(&mm->context.list_lock);
908 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) 915 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
@@ -940,14 +947,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
940 unsigned int bit, mask; 947 unsigned int bit, mask;
941 948
942 mm = tlb->mm; 949 mm = tlb->mm;
943 if (mm_has_pgste(mm)) { 950 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
951 if (page_table_with_pgste(page)) {
944 gmap_disconnect_pgtable(mm, table); 952 gmap_disconnect_pgtable(mm, table);
945 table = (unsigned long *) (__pa(table) | FRAG_MASK); 953 table = (unsigned long *) (__pa(table) | FRAG_MASK);
946 tlb_remove_table(tlb, table); 954 tlb_remove_table(tlb, table);
947 return; 955 return;
948 } 956 }
949 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t))); 957 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
950 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
951 spin_lock_bh(&mm->context.list_lock); 958 spin_lock_bh(&mm->context.list_lock);
952 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK) 959 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
953 list_del(&page->lru); 960 list_del(&page->lru);
@@ -1007,7 +1014,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
1007 struct mmu_table_batch **batch = &tlb->batch; 1014 struct mmu_table_batch **batch = &tlb->batch;
1008 1015
1009 if (*batch) { 1016 if (*batch) {
1010 __tlb_flush_mm(tlb->mm);
1011 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); 1017 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1012 *batch = NULL; 1018 *batch = NULL;
1013 } 1019 }
@@ -1017,11 +1023,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
1017{ 1023{
1018 struct mmu_table_batch **batch = &tlb->batch; 1024 struct mmu_table_batch **batch = &tlb->batch;
1019 1025
1026 tlb->mm->context.flush_mm = 1;
1020 if (*batch == NULL) { 1027 if (*batch == NULL) {
1021 *batch = (struct mmu_table_batch *) 1028 *batch = (struct mmu_table_batch *)
1022 __get_free_page(GFP_NOWAIT | __GFP_NOWARN); 1029 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1023 if (*batch == NULL) { 1030 if (*batch == NULL) {
1024 __tlb_flush_mm(tlb->mm); 1031 __tlb_flush_mm_lazy(tlb->mm);
1025 tlb_remove_table_one(table); 1032 tlb_remove_table_one(table);
1026 return; 1033 return;
1027 } 1034 }
@@ -1029,40 +1036,124 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
1029 } 1036 }
1030 (*batch)->tables[(*batch)->nr++] = table; 1037 (*batch)->tables[(*batch)->nr++] = table;
1031 if ((*batch)->nr == MAX_TABLE_BATCH) 1038 if ((*batch)->nr == MAX_TABLE_BATCH)
1032 tlb_table_flush(tlb); 1039 tlb_flush_mmu(tlb);
1033} 1040}
1034 1041
1035#ifdef CONFIG_TRANSPARENT_HUGEPAGE 1042#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1036void thp_split_vma(struct vm_area_struct *vma) 1043static inline void thp_split_vma(struct vm_area_struct *vma)
1037{ 1044{
1038 unsigned long addr; 1045 unsigned long addr;
1039 struct page *page;
1040 1046
1041 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { 1047 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1042 page = follow_page(vma, addr, FOLL_SPLIT); 1048 follow_page(vma, addr, FOLL_SPLIT);
1043 }
1044} 1049}
1045 1050
1046void thp_split_mm(struct mm_struct *mm) 1051static inline void thp_split_mm(struct mm_struct *mm)
1047{ 1052{
1048 struct vm_area_struct *vma = mm->mmap; 1053 struct vm_area_struct *vma;
1049 1054
1050 while (vma != NULL) { 1055 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1051 thp_split_vma(vma); 1056 thp_split_vma(vma);
1052 vma->vm_flags &= ~VM_HUGEPAGE; 1057 vma->vm_flags &= ~VM_HUGEPAGE;
1053 vma->vm_flags |= VM_NOHUGEPAGE; 1058 vma->vm_flags |= VM_NOHUGEPAGE;
1054 vma = vma->vm_next;
1055 } 1059 }
1060 mm->def_flags |= VM_NOHUGEPAGE;
1061}
1062#else
1063static inline void thp_split_mm(struct mm_struct *mm)
1064{
1056} 1065}
1057#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1066#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1058 1067
1068static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1069 struct mm_struct *mm, pud_t *pud,
1070 unsigned long addr, unsigned long end)
1071{
1072 unsigned long next, *table, *new;
1073 struct page *page;
1074 pmd_t *pmd;
1075
1076 pmd = pmd_offset(pud, addr);
1077 do {
1078 next = pmd_addr_end(addr, end);
1079again:
1080 if (pmd_none_or_clear_bad(pmd))
1081 continue;
1082 table = (unsigned long *) pmd_deref(*pmd);
1083 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1084 if (page_table_with_pgste(page))
1085 continue;
1086 /* Allocate new page table with pgstes */
1087 new = page_table_alloc_pgste(mm, addr);
1088 if (!new) {
1089 mm->context.has_pgste = 0;
1090 continue;
1091 }
1092 spin_lock(&mm->page_table_lock);
1093 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1094 /* Nuke pmd entry pointing to the "short" page table */
1095 pmdp_flush_lazy(mm, addr, pmd);
1096 pmd_clear(pmd);
1097 /* Copy ptes from old table to new table */
1098 memcpy(new, table, PAGE_SIZE/2);
1099 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1100 /* Establish new table */
1101 pmd_populate(mm, pmd, (pte_t *) new);
1102 /* Free old table with rcu, there might be a walker! */
1103 page_table_free_rcu(tlb, table);
1104 new = NULL;
1105 }
1106 spin_unlock(&mm->page_table_lock);
1107 if (new) {
1108 page_table_free_pgste(new);
1109 goto again;
1110 }
1111 } while (pmd++, addr = next, addr != end);
1112
1113 return addr;
1114}
1115
1116static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1117 struct mm_struct *mm, pgd_t *pgd,
1118 unsigned long addr, unsigned long end)
1119{
1120 unsigned long next;
1121 pud_t *pud;
1122
1123 pud = pud_offset(pgd, addr);
1124 do {
1125 next = pud_addr_end(addr, end);
1126 if (pud_none_or_clear_bad(pud))
1127 continue;
1128 next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1129 } while (pud++, addr = next, addr != end);
1130
1131 return addr;
1132}
1133
1134static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1135 unsigned long addr, unsigned long end)
1136{
1137 unsigned long next;
1138 pgd_t *pgd;
1139
1140 pgd = pgd_offset(mm, addr);
1141 do {
1142 next = pgd_addr_end(addr, end);
1143 if (pgd_none_or_clear_bad(pgd))
1144 continue;
1145 next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1146 } while (pgd++, addr = next, addr != end);
1147}
1148
1059/* 1149/*
1060 * switch on pgstes for its userspace process (for kvm) 1150 * switch on pgstes for its userspace process (for kvm)
1061 */ 1151 */
1062int s390_enable_sie(void) 1152int s390_enable_sie(void)
1063{ 1153{
1064 struct task_struct *tsk = current; 1154 struct task_struct *tsk = current;
1065 struct mm_struct *mm, *old_mm; 1155 struct mm_struct *mm = tsk->mm;
1156 struct mmu_gather tlb;
1066 1157
1067 /* Do we have switched amode? If no, we cannot do sie */ 1158 /* Do we have switched amode? If no, we cannot do sie */
1068 if (s390_user_mode == HOME_SPACE_MODE) 1159 if (s390_user_mode == HOME_SPACE_MODE)
@@ -1072,57 +1163,16 @@ int s390_enable_sie(void)
1072 if (mm_has_pgste(tsk->mm)) 1163 if (mm_has_pgste(tsk->mm))
1073 return 0; 1164 return 0;
1074 1165
1075 /* lets check if we are allowed to replace the mm */ 1166 down_write(&mm->mmap_sem);
1076 task_lock(tsk);
1077 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
1078#ifdef CONFIG_AIO
1079 !hlist_empty(&tsk->mm->ioctx_list) ||
1080#endif
1081 tsk->mm != tsk->active_mm) {
1082 task_unlock(tsk);
1083 return -EINVAL;
1084 }
1085 task_unlock(tsk);
1086
1087 /* we copy the mm and let dup_mm create the page tables with_pgstes */
1088 tsk->mm->context.alloc_pgste = 1;
1089 /* make sure that both mms have a correct rss state */
1090 sync_mm_rss(tsk->mm);
1091 mm = dup_mm(tsk);
1092 tsk->mm->context.alloc_pgste = 0;
1093 if (!mm)
1094 return -ENOMEM;
1095
1096#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1097 /* split thp mappings and disable thp for future mappings */ 1167 /* split thp mappings and disable thp for future mappings */
1098 thp_split_mm(mm); 1168 thp_split_mm(mm);
1099 mm->def_flags |= VM_NOHUGEPAGE; 1169 /* Reallocate the page tables with pgstes */
1100#endif 1170 mm->context.has_pgste = 1;
1101 1171 tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1102 /* Now lets check again if something happened */ 1172 page_table_realloc(&tlb, mm, 0, TASK_SIZE);
1103 task_lock(tsk); 1173 tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1104 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 || 1174 up_write(&mm->mmap_sem);
1105#ifdef CONFIG_AIO 1175 return mm->context.has_pgste ? 0 : -ENOMEM;
1106 !hlist_empty(&tsk->mm->ioctx_list) ||
1107#endif
1108 tsk->mm != tsk->active_mm) {
1109 mmput(mm);
1110 task_unlock(tsk);
1111 return -EINVAL;
1112 }
1113
1114 /* ok, we are alone. No ptrace, no threads, etc. */
1115 old_mm = tsk->mm;
1116 tsk->mm = tsk->active_mm = mm;
1117 preempt_disable();
1118 update_mm(mm, tsk);
1119 atomic_inc(&mm->context.attach_count);
1120 atomic_dec(&old_mm->context.attach_count);
1121 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
1122 preempt_enable();
1123 task_unlock(tsk);
1124 mmput(old_mm);
1125 return 0;
1126} 1176}
1127EXPORT_SYMBOL_GPL(s390_enable_sie); 1177EXPORT_SYMBOL_GPL(s390_enable_sie);
1128 1178
@@ -1198,9 +1248,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1198 list_del(lh); 1248 list_del(lh);
1199 } 1249 }
1200 ptep = (pte_t *) pgtable; 1250 ptep = (pte_t *) pgtable;
1201 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1251 pte_val(*ptep) = _PAGE_INVALID;
1202 ptep++; 1252 ptep++;
1203 pte_val(*ptep) = _PAGE_TYPE_EMPTY; 1253 pte_val(*ptep) = _PAGE_INVALID;
1204 return pgtable; 1254 return pgtable;
1205} 1255}
1206#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1256#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8b268fcc4612..bcfb70b60be6 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); 69 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
70 if (!pte) 70 if (!pte)
71 return NULL; 71 return NULL;
72 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, 72 clear_table((unsigned long *) pte, _PAGE_INVALID,
73 PTRS_PER_PTE * sizeof(pte_t)); 73 PTRS_PER_PTE * sizeof(pte_t));
74 return pte; 74 return pte;
75} 75}
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { 101 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
102 pud_val(*pu_dir) = __pa(address) | 102 pud_val(*pu_dir) = __pa(address) |
103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | 103 _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
104 (ro ? _REGION_ENTRY_RO : 0); 104 (ro ? _REGION_ENTRY_PROTECT : 0);
105 address += PUD_SIZE; 105 address += PUD_SIZE;
106 continue; 106 continue;
107 } 107 }
@@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { 118 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
119 pmd_val(*pm_dir) = __pa(address) | 119 pmd_val(*pm_dir) = __pa(address) |
120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | 120 _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
121 (ro ? _SEGMENT_ENTRY_RO : 0); 121 _SEGMENT_ENTRY_YOUNG |
122 (ro ? _SEGMENT_ENTRY_PROTECT : 0);
122 address += PMD_SIZE; 123 address += PMD_SIZE;
123 continue; 124 continue;
124 } 125 }
@@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
131 } 132 }
132 133
133 pt_dir = pte_offset_kernel(pm_dir, address); 134 pt_dir = pte_offset_kernel(pm_dir, address);
134 pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); 135 pte_val(*pt_dir) = __pa(address) |
136 pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
135 address += PAGE_SIZE; 137 address += PAGE_SIZE;
136 } 138 }
137 ret = 0; 139 ret = 0;
@@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
154 pte_t *pt_dir; 156 pte_t *pt_dir;
155 pte_t pte; 157 pte_t pte;
156 158
157 pte_val(pte) = _PAGE_TYPE_EMPTY; 159 pte_val(pte) = _PAGE_INVALID;
158 while (address < end) { 160 while (address < end) {
159 pg_dir = pgd_offset_k(address); 161 pg_dir = pgd_offset_k(address);
160 if (pgd_none(*pg_dir)) { 162 if (pgd_none(*pg_dir)) {
@@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
255 new_page =__pa(vmem_alloc_pages(0)); 257 new_page =__pa(vmem_alloc_pages(0));
256 if (!new_page) 258 if (!new_page)
257 goto out; 259 goto out;
258 pte_val(*pt_dir) = __pa(new_page); 260 pte_val(*pt_dir) =
261 __pa(new_page) | pgprot_val(PAGE_KERNEL);
259 } 262 }
260 address += PAGE_SIZE; 263 address += PAGE_SIZE;
261 } 264 }
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f31..04e1b6a85362 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -346,16 +346,15 @@ static const struct file_operations timer_enabled_fops = {
346}; 346};
347 347
348 348
349static int oprofile_create_hwsampling_files(struct super_block *sb, 349static int oprofile_create_hwsampling_files(struct dentry *root)
350 struct dentry *root)
351{ 350{
352 struct dentry *dir; 351 struct dentry *dir;
353 352
354 dir = oprofilefs_mkdir(sb, root, "timer"); 353 dir = oprofilefs_mkdir(root, "timer");
355 if (!dir) 354 if (!dir)
356 return -EINVAL; 355 return -EINVAL;
357 356
358 oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops); 357 oprofilefs_create_file(dir, "enabled", &timer_enabled_fops);
359 358
360 if (!hwsampler_available) 359 if (!hwsampler_available)
361 return 0; 360 return 0;
@@ -376,17 +375,17 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
376 * and can only be set to 0. 375 * and can only be set to 0.
377 */ 376 */
378 377
379 dir = oprofilefs_mkdir(sb, root, "0"); 378 dir = oprofilefs_mkdir(root, "0");
380 if (!dir) 379 if (!dir)
381 return -EINVAL; 380 return -EINVAL;
382 381
383 oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops); 382 oprofilefs_create_file(dir, "enabled", &hwsampler_fops);
384 oprofilefs_create_file(sb, dir, "event", &zero_fops); 383 oprofilefs_create_file(dir, "event", &zero_fops);
385 oprofilefs_create_file(sb, dir, "count", &hw_interval_fops); 384 oprofilefs_create_file(dir, "count", &hw_interval_fops);
386 oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops); 385 oprofilefs_create_file(dir, "unit_mask", &zero_fops);
387 oprofilefs_create_file(sb, dir, "kernel", &kernel_fops); 386 oprofilefs_create_file(dir, "kernel", &kernel_fops);
388 oprofilefs_create_file(sb, dir, "user", &user_fops); 387 oprofilefs_create_file(dir, "user", &user_fops);
389 oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", 388 oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
390 &oprofile_sdbt_blocks); 389 &oprofile_sdbt_blocks);
391 390
392 } else { 391 } else {
@@ -396,19 +395,19 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
396 * space tools. The /dev/oprofile/hwsampling fs is 395 * space tools. The /dev/oprofile/hwsampling fs is
397 * provided in that case. 396 * provided in that case.
398 */ 397 */
399 dir = oprofilefs_mkdir(sb, root, "hwsampling"); 398 dir = oprofilefs_mkdir(root, "hwsampling");
400 if (!dir) 399 if (!dir)
401 return -EINVAL; 400 return -EINVAL;
402 401
403 oprofilefs_create_file(sb, dir, "hwsampler", 402 oprofilefs_create_file(dir, "hwsampler",
404 &hwsampler_fops); 403 &hwsampler_fops);
405 oprofilefs_create_file(sb, dir, "hw_interval", 404 oprofilefs_create_file(dir, "hw_interval",
406 &hw_interval_fops); 405 &hw_interval_fops);
407 oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval", 406 oprofilefs_create_ro_ulong(dir, "hw_min_interval",
408 &oprofile_min_interval); 407 &oprofile_min_interval);
409 oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval", 408 oprofilefs_create_ro_ulong(dir, "hw_max_interval",
410 &oprofile_max_interval); 409 &oprofile_max_interval);
411 oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks", 410 oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
412 &oprofile_sdbt_blocks); 411 &oprofile_sdbt_blocks);
413 } 412 }
414 return 0; 413 return 0;
@@ -440,7 +439,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
440 switch (id.machine) { 439 switch (id.machine) {
441 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; 440 case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
442 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; 441 case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
443 case 0x2827: ops->cpu_type = "s390/zEC12"; break; 442 case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
444 default: return -ENODEV; 443 default: return -ENODEV;
445 } 444 }
446 } 445 }
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 086a2e37935d..a9e1dc4ae442 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
2# Makefile for the s390 PCI subsystem. 2# Makefile for the s390 PCI subsystem.
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ 5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
6 pci_event.o pci_debug.o pci_insn.o 6 pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e2956ad39a4f..f17a8343e360 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -42,45 +42,26 @@
42#define SIC_IRQ_MODE_SINGLE 1 42#define SIC_IRQ_MODE_SINGLE 1
43 43
44#define ZPCI_NR_DMA_SPACES 1 44#define ZPCI_NR_DMA_SPACES 1
45#define ZPCI_MSI_VEC_BITS 6
46#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS 45#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
47 46
48/* list of all detected zpci devices */ 47/* list of all detected zpci devices */
49LIST_HEAD(zpci_list); 48static LIST_HEAD(zpci_list);
50EXPORT_SYMBOL_GPL(zpci_list); 49static DEFINE_SPINLOCK(zpci_list_lock);
51DEFINE_MUTEX(zpci_list_lock);
52EXPORT_SYMBOL_GPL(zpci_list_lock);
53 50
54static struct pci_hp_callback_ops *hotplug_ops; 51static void zpci_enable_irq(struct irq_data *data);
52static void zpci_disable_irq(struct irq_data *data);
55 53
56static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); 54static struct irq_chip zpci_irq_chip = {
57static DEFINE_SPINLOCK(zpci_domain_lock); 55 .name = "zPCI",
58 56 .irq_unmask = zpci_enable_irq,
59struct callback { 57 .irq_mask = zpci_disable_irq,
60 irq_handler_t handler;
61 void *data;
62}; 58};
63 59
64struct zdev_irq_map { 60static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
65 unsigned long aibv; /* AI bit vector */ 61static DEFINE_SPINLOCK(zpci_domain_lock);
66 int msi_vecs; /* consecutive MSI-vectors used */
67 int __unused;
68 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
69 spinlock_t lock; /* protect callbacks against de-reg */
70};
71
72struct intr_bucket {
73 /* amap of adapters, one bit per dev, corresponds to one irq nr */
74 unsigned long *alloc;
75 /* AI summary bit, global page for all devices */
76 unsigned long *aisb;
77 /* pointer to aibv and callback data in zdev */
78 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
79 /* protects the whole bucket struct */
80 spinlock_t lock;
81};
82 62
83static struct intr_bucket *bucket; 63static struct airq_iv *zpci_aisb_iv;
64static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
84 65
85/* Adapter interrupt definitions */ 66/* Adapter interrupt definitions */
86static void zpci_irq_handler(struct airq_struct *airq); 67static void zpci_irq_handler(struct airq_struct *airq);
@@ -96,27 +77,8 @@ static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
96struct zpci_iomap_entry *zpci_iomap_start; 77struct zpci_iomap_entry *zpci_iomap_start;
97EXPORT_SYMBOL_GPL(zpci_iomap_start); 78EXPORT_SYMBOL_GPL(zpci_iomap_start);
98 79
99/* highest irq summary bit */
100static int __read_mostly aisb_max;
101
102static struct kmem_cache *zdev_irq_cache;
103static struct kmem_cache *zdev_fmb_cache; 80static struct kmem_cache *zdev_fmb_cache;
104 81
105static inline int irq_to_msi_nr(unsigned int irq)
106{
107 return irq & ZPCI_MSI_MASK;
108}
109
110static inline int irq_to_dev_nr(unsigned int irq)
111{
112 return irq >> ZPCI_MSI_VEC_BITS;
113}
114
115static inline struct zdev_irq_map *get_imap(unsigned int irq)
116{
117 return bucket->imap[irq_to_dev_nr(irq)];
118}
119
120struct zpci_dev *get_zdev(struct pci_dev *pdev) 82struct zpci_dev *get_zdev(struct pci_dev *pdev)
121{ 83{
122 return (struct zpci_dev *) pdev->sysdata; 84 return (struct zpci_dev *) pdev->sysdata;
@@ -126,22 +88,17 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
126{ 88{
127 struct zpci_dev *tmp, *zdev = NULL; 89 struct zpci_dev *tmp, *zdev = NULL;
128 90
129 mutex_lock(&zpci_list_lock); 91 spin_lock(&zpci_list_lock);
130 list_for_each_entry(tmp, &zpci_list, entry) { 92 list_for_each_entry(tmp, &zpci_list, entry) {
131 if (tmp->fid == fid) { 93 if (tmp->fid == fid) {
132 zdev = tmp; 94 zdev = tmp;
133 break; 95 break;
134 } 96 }
135 } 97 }
136 mutex_unlock(&zpci_list_lock); 98 spin_unlock(&zpci_list_lock);
137 return zdev; 99 return zdev;
138} 100}
139 101
140bool zpci_fid_present(u32 fid)
141{
142 return (get_zdev_by_fid(fid) != NULL) ? true : false;
143}
144
145static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) 102static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
146{ 103{
147 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; 104 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
@@ -160,8 +117,7 @@ int pci_proc_domain(struct pci_bus *bus)
160EXPORT_SYMBOL_GPL(pci_proc_domain); 117EXPORT_SYMBOL_GPL(pci_proc_domain);
161 118
162/* Modify PCI: Register adapter interruptions */ 119/* Modify PCI: Register adapter interruptions */
163static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, 120static int zpci_set_airq(struct zpci_dev *zdev)
164 u64 aibv)
165{ 121{
166 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); 122 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
167 struct zpci_fib *fib; 123 struct zpci_fib *fib;
@@ -172,14 +128,14 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
172 return -ENOMEM; 128 return -ENOMEM;
173 129
174 fib->isc = PCI_ISC; 130 fib->isc = PCI_ISC;
175 fib->noi = zdev->irq_map->msi_vecs;
176 fib->sum = 1; /* enable summary notifications */ 131 fib->sum = 1; /* enable summary notifications */
177 fib->aibv = aibv; 132 fib->noi = airq_iv_end(zdev->aibv);
178 fib->aibvo = 0; /* every function has its own page */ 133 fib->aibv = (unsigned long) zdev->aibv->vector;
179 fib->aisb = (u64) bucket->aisb + aisb / 8; 134 fib->aibvo = 0; /* each zdev has its own interrupt vector */
180 fib->aisbo = aisb & ZPCI_MSI_MASK; 135 fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
136 fib->aisbo = zdev->aisb & 63;
181 137
182 rc = s390pci_mod_fc(req, fib); 138 rc = zpci_mod_fc(req, fib);
183 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); 139 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
184 140
185 free_page((unsigned long) fib); 141 free_page((unsigned long) fib);
@@ -209,7 +165,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
209 fib->iota = args->iota; 165 fib->iota = args->iota;
210 fib->fmb_addr = args->fmb_addr; 166 fib->fmb_addr = args->fmb_addr;
211 167
212 rc = s390pci_mod_fc(req, fib); 168 rc = zpci_mod_fc(req, fib);
213 free_page((unsigned long) fib); 169 free_page((unsigned long) fib);
214 return rc; 170 return rc;
215} 171}
@@ -234,7 +190,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
234} 190}
235 191
236/* Modify PCI: Unregister adapter interruptions */ 192/* Modify PCI: Unregister adapter interruptions */
237static int zpci_unregister_airq(struct zpci_dev *zdev) 193static int zpci_clear_airq(struct zpci_dev *zdev)
238{ 194{
239 struct mod_pci_args args = { 0, 0, 0, 0 }; 195 struct mod_pci_args args = { 0, 0, 0, 0 };
240 196
@@ -283,7 +239,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
283 u64 data; 239 u64 data;
284 int rc; 240 int rc;
285 241
286 rc = s390pci_load(&data, req, offset); 242 rc = zpci_load(&data, req, offset);
287 if (!rc) { 243 if (!rc) {
288 data = data << ((8 - len) * 8); 244 data = data << ((8 - len) * 8);
289 data = le64_to_cpu(data); 245 data = le64_to_cpu(data);
@@ -301,25 +257,46 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
301 257
302 data = cpu_to_le64(data); 258 data = cpu_to_le64(data);
303 data = data >> ((8 - len) * 8); 259 data = data >> ((8 - len) * 8);
304 rc = s390pci_store(data, req, offset); 260 rc = zpci_store(data, req, offset);
305 return rc; 261 return rc;
306} 262}
307 263
308void enable_irq(unsigned int irq) 264static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
265{
266 int offset, pos;
267 u32 mask_bits;
268
269 if (msi->msi_attrib.is_msix) {
270 offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
271 PCI_MSIX_ENTRY_VECTOR_CTRL;
272 msi->masked = readl(msi->mask_base + offset);
273 writel(flag, msi->mask_base + offset);
274 } else if (msi->msi_attrib.maskbit) {
275 pos = (long) msi->mask_base;
276 pci_read_config_dword(msi->dev, pos, &mask_bits);
277 mask_bits &= ~(mask);
278 mask_bits |= flag & mask;
279 pci_write_config_dword(msi->dev, pos, mask_bits);
280 } else
281 return 0;
282
283 msi->msi_attrib.maskbit = !!flag;
284 return 1;
285}
286
287static void zpci_enable_irq(struct irq_data *data)
309{ 288{
310 struct msi_desc *msi = irq_get_msi_desc(irq); 289 struct msi_desc *msi = irq_get_msi_desc(data->irq);
311 290
312 zpci_msi_set_mask_bits(msi, 1, 0); 291 zpci_msi_set_mask_bits(msi, 1, 0);
313} 292}
314EXPORT_SYMBOL_GPL(enable_irq);
315 293
316void disable_irq(unsigned int irq) 294static void zpci_disable_irq(struct irq_data *data)
317{ 295{
318 struct msi_desc *msi = irq_get_msi_desc(irq); 296 struct msi_desc *msi = irq_get_msi_desc(data->irq);
319 297
320 zpci_msi_set_mask_bits(msi, 1, 1); 298 zpci_msi_set_mask_bits(msi, 1, 1);
321} 299}
322EXPORT_SYMBOL_GPL(disable_irq);
323 300
324void pcibios_fixup_bus(struct pci_bus *bus) 301void pcibios_fixup_bus(struct pci_bus *bus)
325{ 302{
@@ -404,152 +381,147 @@ static struct pci_ops pci_root_ops = {
404 .write = pci_write, 381 .write = pci_write,
405}; 382};
406 383
407/* store the last handled bit to implement fair scheduling of devices */
408static DEFINE_PER_CPU(unsigned long, next_sbit);
409
410static void zpci_irq_handler(struct airq_struct *airq) 384static void zpci_irq_handler(struct airq_struct *airq)
411{ 385{
412 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); 386 unsigned long si, ai;
413 int rescan = 0, max = aisb_max; 387 struct airq_iv *aibv;
414 struct zdev_irq_map *imap; 388 int irqs_on = 0;
415 389
416 inc_irq_stat(IRQIO_PCI); 390 inc_irq_stat(IRQIO_PCI);
417 sbit = start; 391 for (si = 0;;) {
418 392 /* Scan adapter summary indicator bit vector */
419scan: 393 si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
420 /* find summary_bit */ 394 if (si == -1UL) {
421 for_each_set_bit_left_cont(sbit, bucket->aisb, max) { 395 if (irqs_on++)
422 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); 396 /* End of second scan with interrupts on. */
423 last = sbit; 397 break;
398 /* First scan complete, reenable interrupts. */
399 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
400 si = 0;
401 continue;
402 }
424 403
425 /* find vector bit */ 404 /* Scan the adapter interrupt vector for this device. */
426 imap = bucket->imap[sbit]; 405 aibv = zpci_aibv[si];
427 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { 406 for (ai = 0;;) {
407 ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
408 if (ai == -1UL)
409 break;
428 inc_irq_stat(IRQIO_MSI); 410 inc_irq_stat(IRQIO_MSI);
429 clear_bit(63 - mbit, &imap->aibv); 411 airq_iv_lock(aibv, ai);
430 412 generic_handle_irq(airq_iv_get_data(aibv, ai));
431 spin_lock(&imap->lock); 413 airq_iv_unlock(aibv, ai);
432 if (imap->cb[mbit].handler)
433 imap->cb[mbit].handler(mbit,
434 imap->cb[mbit].data);
435 spin_unlock(&imap->lock);
436 } 414 }
437 } 415 }
438
439 if (rescan)
440 goto out;
441
442 /* scan the skipped bits */
443 if (start > 0) {
444 sbit = 0;
445 max = start;
446 start = 0;
447 goto scan;
448 }
449
450 /* enable interrupts again */
451 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
452
453 /* check again to not lose initiative */
454 rmb();
455 max = aisb_max;
456 sbit = find_first_bit_left(bucket->aisb, max);
457 if (sbit != max) {
458 rescan++;
459 goto scan;
460 }
461out:
462 /* store next device bit to scan */
463 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
464} 416}
465 417
466/* msi_vecs - number of requested interrupts, 0 place function to error state */ 418int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
467static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
468{ 419{
469 struct zpci_dev *zdev = get_zdev(pdev); 420 struct zpci_dev *zdev = get_zdev(pdev);
470 unsigned int aisb, msi_nr; 421 unsigned int hwirq, irq, msi_vecs;
422 unsigned long aisb;
471 struct msi_desc *msi; 423 struct msi_desc *msi;
424 struct msi_msg msg;
472 int rc; 425 int rc;
473 426
474 /* store the number of used MSI vectors */ 427 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
475 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); 428 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
476 429 return -EINVAL;
477 spin_lock(&bucket->lock); 430 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
478 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); 431 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
479 /* alloc map exhausted? */
480 if (aisb == PAGE_SIZE) {
481 spin_unlock(&bucket->lock);
482 return -EIO;
483 }
484 set_bit(aisb, bucket->alloc);
485 spin_unlock(&bucket->lock);
486 432
433 /* Allocate adapter summary indicator bit */
434 rc = -EIO;
435 aisb = airq_iv_alloc_bit(zpci_aisb_iv);
436 if (aisb == -1UL)
437 goto out;
487 zdev->aisb = aisb; 438 zdev->aisb = aisb;
488 if (aisb + 1 > aisb_max)
489 aisb_max = aisb + 1;
490 439
491 /* wire up IRQ shortcut pointer */ 440 /* Create adapter interrupt vector */
492 bucket->imap[zdev->aisb] = zdev->irq_map; 441 rc = -ENOMEM;
493 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); 442 zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
443 if (!zdev->aibv)
444 goto out_si;
494 445
495 /* TODO: irq number 0 wont be found if we return less than requested MSIs. 446 /* Wire up shortcut pointer */
496 * ignore it for now and fix in common code. 447 zpci_aibv[aisb] = zdev->aibv;
497 */
498 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
499 448
449 /* Request MSI interrupts */
450 hwirq = 0;
500 list_for_each_entry(msi, &pdev->msi_list, list) { 451 list_for_each_entry(msi, &pdev->msi_list, list) {
501 rc = zpci_setup_msi_irq(zdev, msi, msi_nr, 452 rc = -EIO;
502 aisb << ZPCI_MSI_VEC_BITS); 453 irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
454 if (irq == NO_IRQ)
455 goto out_msi;
456 rc = irq_set_msi_desc(irq, msi);
503 if (rc) 457 if (rc)
504 return rc; 458 goto out_msi;
505 msi_nr++; 459 irq_set_chip_and_handler(irq, &zpci_irq_chip,
460 handle_simple_irq);
461 msg.data = hwirq;
462 msg.address_lo = zdev->msi_addr & 0xffffffff;
463 msg.address_hi = zdev->msi_addr >> 32;
464 write_msi_msg(irq, &msg);
465 airq_iv_set_data(zdev->aibv, hwirq, irq);
466 hwirq++;
506 } 467 }
507 468
508 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); 469 /* Enable adapter interrupts */
509 if (rc) { 470 rc = zpci_set_airq(zdev);
510 clear_bit(aisb, bucket->alloc); 471 if (rc)
511 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); 472 goto out_msi;
512 return rc; 473
474 return (msi_vecs == nvec) ? 0 : msi_vecs;
475
476out_msi:
477 list_for_each_entry(msi, &pdev->msi_list, list) {
478 if (hwirq-- == 0)
479 break;
480 irq_set_msi_desc(msi->irq, NULL);
481 irq_free_desc(msi->irq);
482 msi->msg.address_lo = 0;
483 msi->msg.address_hi = 0;
484 msi->msg.data = 0;
485 msi->irq = 0;
513 } 486 }
514 return (zdev->irq_map->msi_vecs == msi_vecs) ? 487 zpci_aibv[aisb] = NULL;
515 0 : zdev->irq_map->msi_vecs; 488 airq_iv_release(zdev->aibv);
489out_si:
490 airq_iv_free_bit(zpci_aisb_iv, aisb);
491out:
492 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
493 return rc;
516} 494}
517 495
518static void zpci_teardown_msi(struct pci_dev *pdev) 496void arch_teardown_msi_irqs(struct pci_dev *pdev)
519{ 497{
520 struct zpci_dev *zdev = get_zdev(pdev); 498 struct zpci_dev *zdev = get_zdev(pdev);
521 struct msi_desc *msi; 499 struct msi_desc *msi;
522 int aisb, rc; 500 int rc;
523 501
524 rc = zpci_unregister_airq(zdev); 502 pr_info("%s: on pdev: %p\n", __func__, pdev);
503
504 /* Disable adapter interrupts */
505 rc = zpci_clear_airq(zdev);
525 if (rc) { 506 if (rc) {
526 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); 507 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
527 return; 508 return;
528 } 509 }
529 510
530 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); 511 /* Release MSI interrupts */
531 aisb = irq_to_dev_nr(msi->irq); 512 list_for_each_entry(msi, &pdev->msi_list, list) {
532 513 zpci_msi_set_mask_bits(msi, 1, 1);
533 list_for_each_entry(msi, &pdev->msi_list, list) 514 irq_set_msi_desc(msi->irq, NULL);
534 zpci_teardown_msi_irq(zdev, msi); 515 irq_free_desc(msi->irq);
535 516 msi->msg.address_lo = 0;
536 clear_bit(aisb, bucket->alloc); 517 msi->msg.address_hi = 0;
537 if (aisb + 1 == aisb_max) 518 msi->msg.data = 0;
538 aisb_max--; 519 msi->irq = 0;
539} 520 }
540
541int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
542{
543 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
544 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
545 return -EINVAL;
546 return zpci_setup_msi(pdev, nvec);
547}
548 521
549void arch_teardown_msi_irqs(struct pci_dev *pdev) 522 zpci_aibv[zdev->aisb] = NULL;
550{ 523 airq_iv_release(zdev->aibv);
551 pr_info("%s: on pdev: %p\n", __func__, pdev); 524 airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
552 zpci_teardown_msi(pdev);
553} 525}
554 526
555static void zpci_map_resources(struct zpci_dev *zdev) 527static void zpci_map_resources(struct zpci_dev *zdev)
@@ -564,8 +536,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
564 continue; 536 continue;
565 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 537 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
566 pdev->resource[i].end = pdev->resource[i].start + len - 1; 538 pdev->resource[i].end = pdev->resource[i].start + len - 1;
567 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
568 i, pdev->resource[i].start, pdev->resource[i].end);
569 } 539 }
570} 540}
571 541
@@ -589,162 +559,47 @@ struct zpci_dev *zpci_alloc_device(void)
589 559
590 /* Alloc memory for our private pci device data */ 560 /* Alloc memory for our private pci device data */
591 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); 561 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
592 if (!zdev) 562 return zdev ? : ERR_PTR(-ENOMEM);
593 return ERR_PTR(-ENOMEM);
594
595 /* Alloc aibv & callback space */
596 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
597 if (!zdev->irq_map)
598 goto error;
599 WARN_ON((u64) zdev->irq_map & 0xff);
600 return zdev;
601
602error:
603 kfree(zdev);
604 return ERR_PTR(-ENOMEM);
605} 563}
606 564
607void zpci_free_device(struct zpci_dev *zdev) 565void zpci_free_device(struct zpci_dev *zdev)
608{ 566{
609 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
610 kfree(zdev); 567 kfree(zdev);
611} 568}
612 569
613/*
614 * Too late for any s390 specific setup, since interrupts must be set up
615 * already which requires DMA setup too and the pci scan will access the
616 * config space, which only works if the function handle is enabled.
617 */
618int pcibios_enable_device(struct pci_dev *pdev, int mask)
619{
620 struct resource *res;
621 u16 cmd;
622 int i;
623
624 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
625
626 for (i = 0; i < PCI_BAR_COUNT; i++) {
627 res = &pdev->resource[i];
628
629 if (res->flags & IORESOURCE_IO)
630 return -EINVAL;
631
632 if (res->flags & IORESOURCE_MEM)
633 cmd |= PCI_COMMAND_MEMORY;
634 }
635 pci_write_config_word(pdev, PCI_COMMAND, cmd);
636 return 0;
637}
638
639int pcibios_add_platform_entries(struct pci_dev *pdev) 570int pcibios_add_platform_entries(struct pci_dev *pdev)
640{ 571{
641 return zpci_sysfs_add_device(&pdev->dev); 572 return zpci_sysfs_add_device(&pdev->dev);
642} 573}
643 574
644int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
645{
646 int msi_nr = irq_to_msi_nr(irq);
647 struct zdev_irq_map *imap;
648 struct msi_desc *msi;
649
650 msi = irq_get_msi_desc(irq);
651 if (!msi)
652 return -EIO;
653
654 imap = get_imap(irq);
655 spin_lock_init(&imap->lock);
656
657 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
658 imap->cb[msi_nr].handler = handler;
659 imap->cb[msi_nr].data = data;
660
661 /*
662 * The generic MSI code returns with the interrupt disabled on the
663 * card, using the MSI mask bits. Firmware doesn't appear to unmask
664 * at that level, so we do it here by hand.
665 */
666 zpci_msi_set_mask_bits(msi, 1, 0);
667 return 0;
668}
669
670void zpci_free_irq(unsigned int irq)
671{
672 struct zdev_irq_map *imap = get_imap(irq);
673 int msi_nr = irq_to_msi_nr(irq);
674 unsigned long flags;
675
676 pr_debug("%s: for irq: %d\n", __func__, irq);
677
678 spin_lock_irqsave(&imap->lock, flags);
679 imap->cb[msi_nr].handler = NULL;
680 imap->cb[msi_nr].data = NULL;
681 spin_unlock_irqrestore(&imap->lock, flags);
682}
683
684int request_irq(unsigned int irq, irq_handler_t handler,
685 unsigned long irqflags, const char *devname, void *dev_id)
686{
687 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
688 __func__, irq, handler, irqflags, devname);
689
690 return zpci_request_irq(irq, handler, dev_id);
691}
692EXPORT_SYMBOL_GPL(request_irq);
693
694void free_irq(unsigned int irq, void *dev_id)
695{
696 zpci_free_irq(irq);
697}
698EXPORT_SYMBOL_GPL(free_irq);
699
700static int __init zpci_irq_init(void) 575static int __init zpci_irq_init(void)
701{ 576{
702 int cpu, rc; 577 int rc;
703
704 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
705 if (!bucket)
706 return -ENOMEM;
707
708 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
709 if (!bucket->aisb) {
710 rc = -ENOMEM;
711 goto out_aisb;
712 }
713
714 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
715 if (!bucket->alloc) {
716 rc = -ENOMEM;
717 goto out_alloc;
718 }
719 578
720 rc = register_adapter_interrupt(&zpci_airq); 579 rc = register_adapter_interrupt(&zpci_airq);
721 if (rc) 580 if (rc)
722 goto out_ai; 581 goto out;
723 /* Set summary to 1 to be called every time for the ISC. */ 582 /* Set summary to 1 to be called every time for the ISC. */
724 *zpci_airq.lsi_ptr = 1; 583 *zpci_airq.lsi_ptr = 1;
725 584
726 for_each_online_cpu(cpu) 585 rc = -ENOMEM;
727 per_cpu(next_sbit, cpu) = 0; 586 zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
587 if (!zpci_aisb_iv)
588 goto out_airq;
728 589
729 spin_lock_init(&bucket->lock); 590 zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
730 set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
731 return 0; 591 return 0;
732 592
733out_ai: 593out_airq:
734 free_page((unsigned long) bucket->alloc); 594 unregister_adapter_interrupt(&zpci_airq);
735out_alloc: 595out:
736 free_page((unsigned long) bucket->aisb);
737out_aisb:
738 kfree(bucket);
739 return rc; 596 return rc;
740} 597}
741 598
742static void zpci_irq_exit(void) 599static void zpci_irq_exit(void)
743{ 600{
744 free_page((unsigned long) bucket->alloc); 601 airq_iv_release(zpci_aisb_iv);
745 free_page((unsigned long) bucket->aisb);
746 unregister_adapter_interrupt(&zpci_airq); 602 unregister_adapter_interrupt(&zpci_airq);
747 kfree(bucket);
748} 603}
749 604
750static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, 605static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
@@ -801,16 +656,49 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
801int pcibios_add_device(struct pci_dev *pdev) 656int pcibios_add_device(struct pci_dev *pdev)
802{ 657{
803 struct zpci_dev *zdev = get_zdev(pdev); 658 struct zpci_dev *zdev = get_zdev(pdev);
659 struct resource *res;
660 int i;
661
662 zdev->pdev = pdev;
663 zpci_map_resources(zdev);
664
665 for (i = 0; i < PCI_BAR_COUNT; i++) {
666 res = &pdev->resource[i];
667 if (res->parent || !res->flags)
668 continue;
669 pci_claim_resource(pdev, i);
670 }
671
672 return 0;
673}
674
675int pcibios_enable_device(struct pci_dev *pdev, int mask)
676{
677 struct zpci_dev *zdev = get_zdev(pdev);
678 struct resource *res;
679 u16 cmd;
680 int i;
804 681
805 zdev->pdev = pdev; 682 zdev->pdev = pdev;
806 zpci_debug_init_device(zdev); 683 zpci_debug_init_device(zdev);
807 zpci_fmb_enable_device(zdev); 684 zpci_fmb_enable_device(zdev);
808 zpci_map_resources(zdev); 685 zpci_map_resources(zdev);
809 686
687 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
688 for (i = 0; i < PCI_BAR_COUNT; i++) {
689 res = &pdev->resource[i];
690
691 if (res->flags & IORESOURCE_IO)
692 return -EINVAL;
693
694 if (res->flags & IORESOURCE_MEM)
695 cmd |= PCI_COMMAND_MEMORY;
696 }
697 pci_write_config_word(pdev, PCI_COMMAND, cmd);
810 return 0; 698 return 0;
811} 699}
812 700
813void pcibios_release_device(struct pci_dev *pdev) 701void pcibios_disable_device(struct pci_dev *pdev)
814{ 702{
815 struct zpci_dev *zdev = get_zdev(pdev); 703 struct zpci_dev *zdev = get_zdev(pdev);
816 704
@@ -898,6 +786,8 @@ int zpci_enable_device(struct zpci_dev *zdev)
898 rc = zpci_dma_init_device(zdev); 786 rc = zpci_dma_init_device(zdev);
899 if (rc) 787 if (rc)
900 goto out_dma; 788 goto out_dma;
789
790 zdev->state = ZPCI_FN_STATE_ONLINE;
901 return 0; 791 return 0;
902 792
903out_dma: 793out_dma:
@@ -926,18 +816,16 @@ int zpci_create_device(struct zpci_dev *zdev)
926 rc = zpci_enable_device(zdev); 816 rc = zpci_enable_device(zdev);
927 if (rc) 817 if (rc)
928 goto out_free; 818 goto out_free;
929
930 zdev->state = ZPCI_FN_STATE_ONLINE;
931 } 819 }
932 rc = zpci_scan_bus(zdev); 820 rc = zpci_scan_bus(zdev);
933 if (rc) 821 if (rc)
934 goto out_disable; 822 goto out_disable;
935 823
936 mutex_lock(&zpci_list_lock); 824 spin_lock(&zpci_list_lock);
937 list_add_tail(&zdev->entry, &zpci_list); 825 list_add_tail(&zdev->entry, &zpci_list);
938 if (hotplug_ops) 826 spin_unlock(&zpci_list_lock);
939 hotplug_ops->create_slot(zdev); 827
940 mutex_unlock(&zpci_list_lock); 828 zpci_init_slot(zdev);
941 829
942 return 0; 830 return 0;
943 831
@@ -967,15 +855,10 @@ static inline int barsize(u8 size)
967 855
968static int zpci_mem_init(void) 856static int zpci_mem_init(void)
969{ 857{
970 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
971 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
972 if (!zdev_irq_cache)
973 goto error_zdev;
974
975 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 858 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
976 16, 0, NULL); 859 16, 0, NULL);
977 if (!zdev_fmb_cache) 860 if (!zdev_fmb_cache)
978 goto error_fmb; 861 goto error_zdev;
979 862
980 /* TODO: use realloc */ 863 /* TODO: use realloc */
981 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), 864 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
@@ -986,8 +869,6 @@ static int zpci_mem_init(void)
986 869
987error_iomap: 870error_iomap:
988 kmem_cache_destroy(zdev_fmb_cache); 871 kmem_cache_destroy(zdev_fmb_cache);
989error_fmb:
990 kmem_cache_destroy(zdev_irq_cache);
991error_zdev: 872error_zdev:
992 return -ENOMEM; 873 return -ENOMEM;
993} 874}
@@ -995,28 +876,10 @@ error_zdev:
995static void zpci_mem_exit(void) 876static void zpci_mem_exit(void)
996{ 877{
997 kfree(zpci_iomap_start); 878 kfree(zpci_iomap_start);
998 kmem_cache_destroy(zdev_irq_cache);
999 kmem_cache_destroy(zdev_fmb_cache); 879 kmem_cache_destroy(zdev_fmb_cache);
1000} 880}
1001 881
1002void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) 882static unsigned int s390_pci_probe;
1003{
1004 mutex_lock(&zpci_list_lock);
1005 hotplug_ops = ops;
1006 mutex_unlock(&zpci_list_lock);
1007}
1008EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
1009
1010void zpci_deregister_hp_ops(void)
1011{
1012 mutex_lock(&zpci_list_lock);
1013 hotplug_ops = NULL;
1014 mutex_unlock(&zpci_list_lock);
1015}
1016EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
1017
1018unsigned int s390_pci_probe;
1019EXPORT_SYMBOL_GPL(s390_pci_probe);
1020 883
1021char * __init pcibios_setup(char *str) 884char * __init pcibios_setup(char *str)
1022{ 885{
@@ -1044,16 +907,12 @@ static int __init pci_base_init(void)
1044 907
1045 rc = zpci_debug_init(); 908 rc = zpci_debug_init();
1046 if (rc) 909 if (rc)
1047 return rc; 910 goto out;
1048 911
1049 rc = zpci_mem_init(); 912 rc = zpci_mem_init();
1050 if (rc) 913 if (rc)
1051 goto out_mem; 914 goto out_mem;
1052 915
1053 rc = zpci_msihash_init();
1054 if (rc)
1055 goto out_hash;
1056
1057 rc = zpci_irq_init(); 916 rc = zpci_irq_init();
1058 if (rc) 917 if (rc)
1059 goto out_irq; 918 goto out_irq;
@@ -1062,7 +921,7 @@ static int __init pci_base_init(void)
1062 if (rc) 921 if (rc)
1063 goto out_dma; 922 goto out_dma;
1064 923
1065 rc = clp_find_pci_devices(); 924 rc = clp_scan_pci_devices();
1066 if (rc) 925 if (rc)
1067 goto out_find; 926 goto out_find;
1068 927
@@ -1073,11 +932,15 @@ out_find:
1073out_dma: 932out_dma:
1074 zpci_irq_exit(); 933 zpci_irq_exit();
1075out_irq: 934out_irq:
1076 zpci_msihash_exit();
1077out_hash:
1078 zpci_mem_exit(); 935 zpci_mem_exit();
1079out_mem: 936out_mem:
1080 zpci_debug_exit(); 937 zpci_debug_exit();
938out:
1081 return rc; 939 return rc;
1082} 940}
1083subsys_initcall(pci_base_init); 941subsys_initcall_sync(pci_base_init);
942
943void zpci_rescan(void)
944{
945 clp_rescan_pci_devices_simple();
946}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 2e9539625d93..475563c3d1e4 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -36,9 +36,9 @@ static inline u8 clp_instr(void *data)
36 return cc; 36 return cc;
37} 37}
38 38
39static void *clp_alloc_block(void) 39static void *clp_alloc_block(gfp_t gfp_mask)
40{ 40{
41 return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); 41 return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
42} 42}
43 43
44static void clp_free_block(void *ptr) 44static void clp_free_block(void *ptr)
@@ -70,7 +70,7 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
70 struct clp_req_rsp_query_pci_grp *rrb; 70 struct clp_req_rsp_query_pci_grp *rrb;
71 int rc; 71 int rc;
72 72
73 rrb = clp_alloc_block(); 73 rrb = clp_alloc_block(GFP_KERNEL);
74 if (!rrb) 74 if (!rrb)
75 return -ENOMEM; 75 return -ENOMEM;
76 76
@@ -113,7 +113,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
113 struct clp_req_rsp_query_pci *rrb; 113 struct clp_req_rsp_query_pci *rrb;
114 int rc; 114 int rc;
115 115
116 rrb = clp_alloc_block(); 116 rrb = clp_alloc_block(GFP_KERNEL);
117 if (!rrb) 117 if (!rrb)
118 return -ENOMEM; 118 return -ENOMEM;
119 119
@@ -179,9 +179,9 @@ error:
179static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) 179static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
180{ 180{
181 struct clp_req_rsp_set_pci *rrb; 181 struct clp_req_rsp_set_pci *rrb;
182 int rc, retries = 1000; 182 int rc, retries = 100;
183 183
184 rrb = clp_alloc_block(); 184 rrb = clp_alloc_block(GFP_KERNEL);
185 if (!rrb) 185 if (!rrb)
186 return -ENOMEM; 186 return -ENOMEM;
187 187
@@ -199,7 +199,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
199 retries--; 199 retries--;
200 if (retries < 0) 200 if (retries < 0)
201 break; 201 break;
202 msleep(1); 202 msleep(20);
203 } 203 }
204 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); 204 } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
205 205
@@ -245,49 +245,12 @@ int clp_disable_fh(struct zpci_dev *zdev)
245 return rc; 245 return rc;
246} 246}
247 247
248static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry) 248static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
249 void (*cb)(struct clp_fh_list_entry *entry))
249{ 250{
250 int present, rc;
251
252 if (!entry->vendor_id)
253 return;
254
255 /* TODO: be a little bit more scalable */
256 present = zpci_fid_present(entry->fid);
257
258 if (present)
259 pr_debug("%s: device %x already present\n", __func__, entry->fid);
260
261 /* skip already used functions */
262 if (present && entry->config_state)
263 return;
264
265 /* aev 306: function moved to stand-by state */
266 if (present && !entry->config_state) {
267 /*
268 * The handle is already disabled, that means no iota/irq freeing via
269 * the firmware interfaces anymore. Need to free resources manually
270 * (DMA memory, debug, sysfs)...
271 */
272 zpci_stop_device(get_zdev_by_fid(entry->fid));
273 return;
274 }
275
276 rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
277 if (rc)
278 pr_err("Failed to add fid: 0x%x\n", entry->fid);
279}
280
281int clp_find_pci_devices(void)
282{
283 struct clp_req_rsp_list_pci *rrb;
284 u64 resume_token = 0; 251 u64 resume_token = 0;
285 int entries, i, rc; 252 int entries, i, rc;
286 253
287 rrb = clp_alloc_block();
288 if (!rrb)
289 return -ENOMEM;
290
291 do { 254 do {
292 memset(rrb, 0, sizeof(*rrb)); 255 memset(rrb, 0, sizeof(*rrb));
293 rrb->request.hdr.len = sizeof(rrb->request); 256 rrb->request.hdr.len = sizeof(rrb->request);
@@ -316,12 +279,101 @@ int clp_find_pci_devices(void)
316 resume_token = rrb->response.resume_token; 279 resume_token = rrb->response.resume_token;
317 280
318 for (i = 0; i < entries; i++) 281 for (i = 0; i < entries; i++)
319 clp_check_pcifn_entry(&rrb->response.fh_list[i]); 282 cb(&rrb->response.fh_list[i]);
320 } while (resume_token); 283 } while (resume_token);
321 284
322 pr_debug("Maximum number of supported PCI functions: %u\n", 285 pr_debug("Maximum number of supported PCI functions: %u\n",
323 rrb->response.max_fn); 286 rrb->response.max_fn);
324out: 287out:
288 return rc;
289}
290
291static void __clp_add(struct clp_fh_list_entry *entry)
292{
293 if (!entry->vendor_id)
294 return;
295
296 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
297}
298
299static void __clp_rescan(struct clp_fh_list_entry *entry)
300{
301 struct zpci_dev *zdev;
302
303 if (!entry->vendor_id)
304 return;
305
306 zdev = get_zdev_by_fid(entry->fid);
307 if (!zdev) {
308 clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
309 return;
310 }
311
312 if (!entry->config_state) {
313 /*
314 * The handle is already disabled, that means no iota/irq freeing via
315 * the firmware interfaces anymore. Need to free resources manually
316 * (DMA memory, debug, sysfs)...
317 */
318 zpci_stop_device(zdev);
319 }
320}
321
322static void __clp_update(struct clp_fh_list_entry *entry)
323{
324 struct zpci_dev *zdev;
325
326 if (!entry->vendor_id)
327 return;
328
329 zdev = get_zdev_by_fid(entry->fid);
330 if (!zdev)
331 return;
332
333 zdev->fh = entry->fh;
334}
335
336int clp_scan_pci_devices(void)
337{
338 struct clp_req_rsp_list_pci *rrb;
339 int rc;
340
341 rrb = clp_alloc_block(GFP_KERNEL);
342 if (!rrb)
343 return -ENOMEM;
344
345 rc = clp_list_pci(rrb, __clp_add);
346
347 clp_free_block(rrb);
348 return rc;
349}
350
351int clp_rescan_pci_devices(void)
352{
353 struct clp_req_rsp_list_pci *rrb;
354 int rc;
355
356 rrb = clp_alloc_block(GFP_KERNEL);
357 if (!rrb)
358 return -ENOMEM;
359
360 rc = clp_list_pci(rrb, __clp_rescan);
361
362 clp_free_block(rrb);
363 return rc;
364}
365
366int clp_rescan_pci_devices_simple(void)
367{
368 struct clp_req_rsp_list_pci *rrb;
369 int rc;
370
371 rrb = clp_alloc_block(GFP_NOWAIT);
372 if (!rrb)
373 return -ENOMEM;
374
375 rc = clp_list_pci(rrb, __clp_update);
376
325 clp_free_block(rrb); 377 clp_free_block(rrb);
326 return rc; 378 return rc;
327} 379}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a2343c1f6e04..7e5573acb063 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -10,6 +10,7 @@
10#include <linux/export.h> 10#include <linux/export.h>
11#include <linux/iommu-helper.h> 11#include <linux/iommu-helper.h>
12#include <linux/dma-mapping.h> 12#include <linux/dma-mapping.h>
13#include <linux/vmalloc.h>
13#include <linux/pci.h> 14#include <linux/pci.h>
14#include <asm/pci_dma.h> 15#include <asm/pci_dma.h>
15 16
@@ -170,8 +171,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
170 */ 171 */
171 goto no_refresh; 172 goto no_refresh;
172 173
173 rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
174 nr_pages * PAGE_SIZE); 175 nr_pages * PAGE_SIZE);
175 176
176no_refresh: 177no_refresh:
177 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); 178 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -407,7 +408,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
407 408
408int zpci_dma_init_device(struct zpci_dev *zdev) 409int zpci_dma_init_device(struct zpci_dev *zdev)
409{ 410{
410 unsigned int bitmap_order;
411 int rc; 411 int rc;
412 412
413 spin_lock_init(&zdev->iommu_bitmap_lock); 413 spin_lock_init(&zdev->iommu_bitmap_lock);
@@ -421,12 +421,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
421 421
422 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; 422 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
423 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; 423 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
424 bitmap_order = get_order(zdev->iommu_pages / 8); 424 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
425 pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
426 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
427
428 zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
429 bitmap_order);
430 if (!zdev->iommu_bitmap) { 425 if (!zdev->iommu_bitmap) {
431 rc = -ENOMEM; 426 rc = -ENOMEM;
432 goto out_reg; 427 goto out_reg;
@@ -451,8 +446,7 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
451{ 446{
452 zpci_unregister_ioat(zdev, 0); 447 zpci_unregister_ioat(zdev, 0);
453 dma_cleanup_tables(zdev); 448 dma_cleanup_tables(zdev);
454 free_pages((unsigned long) zdev->iommu_bitmap, 449 vfree(zdev->iommu_bitmap);
455 get_order(zdev->iommu_pages / 8));
456 zdev->iommu_bitmap = NULL; 450 zdev->iommu_bitmap = NULL;
457 zdev->next_bit = 0; 451 zdev->next_bit = 0;
458} 452}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index ec62e3a0dc09..0aecaf954845 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -69,7 +69,7 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
69 clp_add_pci_device(ccdf->fid, ccdf->fh, 0); 69 clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
70 break; 70 break;
71 case 0x0306: 71 case 0x0306:
72 clp_find_pci_devices(); 72 clp_rescan_pci_devices();
73 break; 73 break;
74 default: 74 default:
75 break; 75 break;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 22eeb9d7ffeb..85267c058af8 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
27 return cc; 27 return cc;
28} 28}
29 29
30int s390pci_mod_fc(u64 req, struct zpci_fib *fib) 30int zpci_mod_fc(u64 req, struct zpci_fib *fib)
31{ 31{
32 u8 cc, status; 32 u8 cc, status;
33 33
@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
61 return cc; 61 return cc;
62} 62}
63 63
64int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) 64int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
65{ 65{
66 u8 cc, status; 66 u8 cc, status;
67 67
@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
78} 78}
79 79
80/* Set Interruption Controls */ 80/* Set Interruption Controls */
81void set_irq_ctrl(u16 ctl, char *unused, u8 isc) 81void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
82{ 82{
83 asm volatile ( 83 asm volatile (
84 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" 84 " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
109 return cc; 109 return cc;
110} 110}
111 111
112int s390pci_load(u64 *data, u64 req, u64 offset) 112int zpci_load(u64 *data, u64 req, u64 offset)
113{ 113{
114 u8 status; 114 u8 status;
115 int cc; 115 int cc;
@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset)
125 __func__, cc, status, req, offset); 125 __func__, cc, status, req, offset);
126 return (cc > 0) ? -EIO : cc; 126 return (cc > 0) ? -EIO : cc;
127} 127}
128EXPORT_SYMBOL_GPL(s390pci_load); 128EXPORT_SYMBOL_GPL(zpci_load);
129 129
130/* PCI Store */ 130/* PCI Store */
131static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) 131static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
147 return cc; 147 return cc;
148} 148}
149 149
150int s390pci_store(u64 data, u64 req, u64 offset) 150int zpci_store(u64 data, u64 req, u64 offset)
151{ 151{
152 u8 status; 152 u8 status;
153 int cc; 153 int cc;
@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset)
163 __func__, cc, status, req, offset); 163 __func__, cc, status, req, offset);
164 return (cc > 0) ? -EIO : cc; 164 return (cc > 0) ? -EIO : cc;
165} 165}
166EXPORT_SYMBOL_GPL(s390pci_store); 166EXPORT_SYMBOL_GPL(zpci_store);
167 167
168/* PCI Store Block */ 168/* PCI Store Block */
169static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) 169static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
183 return cc; 183 return cc;
184} 184}
185 185
186int s390pci_store_block(const u64 *data, u64 req, u64 offset) 186int zpci_store_block(const u64 *data, u64 req, u64 offset)
187{ 187{
188 u8 status; 188 u8 status;
189 int cc; 189 int cc;
@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset)
199 __func__, cc, status, req, offset); 199 __func__, cc, status, req, offset);
200 return (cc > 0) ? -EIO : cc; 200 return (cc > 0) ? -EIO : cc;
201} 201}
202EXPORT_SYMBOL_GPL(s390pci_store_block); 202EXPORT_SYMBOL_GPL(zpci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
deleted file mode 100644
index b097aed05a9b..000000000000
--- a/arch/s390/pci/pci_msi.c
+++ /dev/null
@@ -1,142 +0,0 @@
1/*
2 * Copyright IBM Corp. 2012
3 *
4 * Author(s):
5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7
8#define COMPONENT "zPCI"
9#define pr_fmt(fmt) COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/err.h>
13#include <linux/rculist.h>
14#include <linux/hash.h>
15#include <linux/pci.h>
16#include <linux/msi.h>
17#include <asm/hw_irq.h>
18
19/* mapping of irq numbers to msi_desc */
20static struct hlist_head *msi_hash;
21static const unsigned int msi_hash_bits = 8;
22#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
23#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
24
25static DEFINE_SPINLOCK(msi_map_lock);
26
27struct msi_desc *__irq_get_msi_desc(unsigned int irq)
28{
29 struct msi_map *map;
30
31 hlist_for_each_entry_rcu(map,
32 &msi_hash[msi_hashfn(irq)], msi_chain)
33 if (map->irq == irq)
34 return map->msi;
35 return NULL;
36}
37
38int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
39{
40 if (msi->msi_attrib.is_msix) {
41 int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
42 PCI_MSIX_ENTRY_VECTOR_CTRL;
43 msi->masked = readl(msi->mask_base + offset);
44 writel(flag, msi->mask_base + offset);
45 } else {
46 if (msi->msi_attrib.maskbit) {
47 int pos;
48 u32 mask_bits;
49
50 pos = (long) msi->mask_base;
51 pci_read_config_dword(msi->dev, pos, &mask_bits);
52 mask_bits &= ~(mask);
53 mask_bits |= flag & mask;
54 pci_write_config_dword(msi->dev, pos, mask_bits);
55 } else {
56 return 0;
57 }
58 }
59
60 msi->msi_attrib.maskbit = !!flag;
61 return 1;
62}
63
64int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
65 unsigned int nr, int offset)
66{
67 struct msi_map *map;
68 struct msi_msg msg;
69 int rc;
70
71 map = kmalloc(sizeof(*map), GFP_KERNEL);
72 if (map == NULL)
73 return -ENOMEM;
74
75 map->irq = nr;
76 map->msi = msi;
77 zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
78 INIT_HLIST_NODE(&map->msi_chain);
79
80 pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
81 __func__, nr, msi_hashfn(nr));
82 hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
83
84 spin_lock(&msi_map_lock);
85 rc = irq_set_msi_desc(nr, msi);
86 if (rc) {
87 spin_unlock(&msi_map_lock);
88 hlist_del_rcu(&map->msi_chain);
89 kfree(map);
90 zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
91 return rc;
92 }
93 spin_unlock(&msi_map_lock);
94
95 msg.data = nr - offset;
96 msg.address_lo = zdev->msi_addr & 0xffffffff;
97 msg.address_hi = zdev->msi_addr >> 32;
98 write_msi_msg(nr, &msg);
99 return 0;
100}
101
102void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
103{
104 int irq = msi->irq & ZPCI_MSI_MASK;
105 struct msi_map *map;
106
107 msi->msg.address_lo = 0;
108 msi->msg.address_hi = 0;
109 msi->msg.data = 0;
110 msi->irq = 0;
111 zpci_msi_set_mask_bits(msi, 1, 1);
112
113 spin_lock(&msi_map_lock);
114 map = zdev->msi_map[irq];
115 hlist_del_rcu(&map->msi_chain);
116 kfree(map);
117 zdev->msi_map[irq] = NULL;
118 spin_unlock(&msi_map_lock);
119}
120
121/*
122 * The msi hash table has 256 entries which is good for 4..20
123 * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
124 * the hash table size adjustable later.
125 */
126int __init zpci_msihash_init(void)
127{
128 unsigned int i;
129
130 msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
131 if (!msi_hash)
132 return -ENOMEM;
133
134 for (i = 0; i < MSI_HASH_BUCKETS; i++)
135 INIT_HLIST_HEAD(&msi_hash[i]);
136 return 0;
137}
138
139void __init zpci_msihash_exit(void)
140{
141 kfree(msi_hash);
142}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index e99a2557f186..cf8a12ff733b 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -48,11 +48,38 @@ static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
48} 48}
49static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); 49static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
50 50
51static void recover_callback(struct device *dev)
52{
53 struct pci_dev *pdev = to_pci_dev(dev);
54 struct zpci_dev *zdev = get_zdev(pdev);
55 int ret;
56
57 pci_stop_and_remove_bus_device(pdev);
58 ret = zpci_disable_device(zdev);
59 if (ret)
60 return;
61
62 ret = zpci_enable_device(zdev);
63 if (ret)
64 return;
65
66 pci_rescan_bus(zdev->bus);
67}
68
69static ssize_t store_recover(struct device *dev, struct device_attribute *attr,
70 const char *buf, size_t count)
71{
72 int rc = device_schedule_callback(dev, recover_callback);
73 return rc ? rc : count;
74}
75static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover);
76
51static struct device_attribute *zpci_dev_attrs[] = { 77static struct device_attribute *zpci_dev_attrs[] = {
52 &dev_attr_function_id, 78 &dev_attr_function_id,
53 &dev_attr_function_handle, 79 &dev_attr_function_handle,
54 &dev_attr_pchid, 80 &dev_attr_pchid,
55 &dev_attr_pfgid, 81 &dev_attr_pfgid,
82 &dev_attr_recover,
56 NULL, 83 NULL,
57}; 84};
58 85
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc9020..5fc237581caf 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
87 87
88source "init/Kconfig" 88source "init/Kconfig"
89 89
90source "kernel/Kconfig.freezer"
91
90config MMU 92config MMU
91 def_bool y 93 def_bool y
92 94
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1020dd85431a..1018ed3a3ca5 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -643,9 +643,9 @@ config KEXEC
643 643
644 It is an ongoing process to be certain the hardware in a machine 644 It is an ongoing process to be certain the hardware in a machine
645 is properly shutdown, so do not be surprised if this code does not 645 is properly shutdown, so do not be surprised if this code does not
646 initially work for you. It may help to enable device hotplugging 646 initially work for you. As of this writing the exact hardware
647 support. As of this writing the exact hardware interface is 647 interface is strongly in flux, so no good recommendation can be
648 strongly in flux, so no good recommendation can be made. 648 made.
649 649
650config CRASH_DUMP 650config CRASH_DUMP
651 bool "kernel crash dumps (EXPERIMENTAL)" 651 bool "kernel crash dumps (EXPERIMENTAL)"
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 4d94dff9015c..7291e2f11a47 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = {
80static struct sh_eth_plat_data sh7763_eth_pdata = { 80static struct sh_eth_plat_data sh7763_eth_pdata = {
81 .phy = 0, 81 .phy = 0,
82 .edmac_endian = EDMAC_LITTLE_ENDIAN, 82 .edmac_endian = EDMAC_LITTLE_ENDIAN,
83 .register_type = SH_ETH_REG_GIGABIT,
84 .phy_interface = PHY_INTERFACE_MODE_MII, 83 .phy_interface = PHY_INTERFACE_MODE_MII,
85}; 84};
86 85
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 4f114d1cd019..25c5a932f9fe 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = {
77static struct sh_eth_plat_data sh7757_eth0_pdata = { 77static struct sh_eth_plat_data sh7757_eth0_pdata = {
78 .phy = 1, 78 .phy = 1,
79 .edmac_endian = EDMAC_LITTLE_ENDIAN, 79 .edmac_endian = EDMAC_LITTLE_ENDIAN,
80 .register_type = SH_ETH_REG_FAST_SH4,
81 .set_mdio_gate = sh7757_eth_set_mdio_gate, 80 .set_mdio_gate = sh7757_eth_set_mdio_gate,
82}; 81};
83 82
@@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = {
106static struct sh_eth_plat_data sh7757_eth1_pdata = { 105static struct sh_eth_plat_data sh7757_eth1_pdata = {
107 .phy = 1, 106 .phy = 1,
108 .edmac_endian = EDMAC_LITTLE_ENDIAN, 107 .edmac_endian = EDMAC_LITTLE_ENDIAN,
109 .register_type = SH_ETH_REG_FAST_SH4,
110 .set_mdio_gate = sh7757_eth_set_mdio_gate, 108 .set_mdio_gate = sh7757_eth_set_mdio_gate,
111}; 109};
112 110
@@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = {
151static struct sh_eth_plat_data sh7757_eth_giga0_pdata = { 149static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
152 .phy = 18, 150 .phy = 18,
153 .edmac_endian = EDMAC_LITTLE_ENDIAN, 151 .edmac_endian = EDMAC_LITTLE_ENDIAN,
154 .register_type = SH_ETH_REG_GIGABIT,
155 .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, 152 .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
156 .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, 153 .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
157}; 154};
@@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = {
186static struct sh_eth_plat_data sh7757_eth_giga1_pdata = { 183static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
187 .phy = 19, 184 .phy = 19,
188 .edmac_endian = EDMAC_LITTLE_ENDIAN, 185 .edmac_endian = EDMAC_LITTLE_ENDIAN,
189 .register_type = SH_ETH_REG_GIGABIT,
190 .set_mdio_gate = sh7757_eth_giga_set_mdio_gate, 186 .set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
191 .phy_interface = PHY_INTERFACE_MODE_RGMII_ID, 187 .phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
192}; 188};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 61fade0ffa96..a4f630f04ea3 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
159static struct sh_eth_plat_data sh_eth_plat = { 159static struct sh_eth_plat_data sh_eth_plat = {
160 .phy = 0x1f, /* SMSC LAN8700 */ 160 .phy = 0x1f, /* SMSC LAN8700 */
161 .edmac_endian = EDMAC_LITTLE_ENDIAN, 161 .edmac_endian = EDMAC_LITTLE_ENDIAN,
162 .register_type = SH_ETH_REG_FAST_SH4,
163 .phy_interface = PHY_INTERFACE_MODE_MII, 162 .phy_interface = PHY_INTERFACE_MODE_MII,
164 .ether_link_active_low = 1 163 .ether_link_active_low = 1
165}; 164};
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index b70180ef3e29..21e4230659a5 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -365,7 +365,7 @@ static struct platform_device keysc_device = {
365static struct resource sh_eth_resources[] = { 365static struct resource sh_eth_resources[] = {
366 [0] = { 366 [0] = {
367 .start = SH_ETH_ADDR, 367 .start = SH_ETH_ADDR,
368 .end = SH_ETH_ADDR + 0x1FC, 368 .end = SH_ETH_ADDR + 0x1FC - 1,
369 .flags = IORESOURCE_MEM, 369 .flags = IORESOURCE_MEM,
370 }, 370 },
371 [1] = { 371 [1] = {
@@ -377,6 +377,7 @@ static struct resource sh_eth_resources[] = {
377static struct sh_eth_plat_data sh_eth_plat = { 377static struct sh_eth_plat_data sh_eth_plat = {
378 .phy = 0x1f, /* SMSC LAN8187 */ 378 .phy = 0x1f, /* SMSC LAN8187 */
379 .edmac_endian = EDMAC_LITTLE_ENDIAN, 379 .edmac_endian = EDMAC_LITTLE_ENDIAN,
380 .phy_interface = PHY_INTERFACE_MODE_MII,
380}; 381};
381 382
382static struct platform_device sh_eth_device = { 383static struct platform_device sh_eth_device = {
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 50ba481fa240..2c8fb04685d4 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = {
88static struct sh_eth_plat_data sh7763_eth_pdata = { 88static struct sh_eth_plat_data sh7763_eth_pdata = {
89 .phy = 1, 89 .phy = 1,
90 .edmac_endian = EDMAC_LITTLE_ENDIAN, 90 .edmac_endian = EDMAC_LITTLE_ENDIAN,
91 .register_type = SH_ETH_REG_GIGABIT,
92 .phy_interface = PHY_INTERFACE_MODE_MII, 91 .phy_interface = PHY_INTERFACE_MODE_MII,
93}; 92};
94 93
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 102f5d58b037..60ed3e1c4b75 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -69,7 +69,6 @@ static void pcibios_scanbus(struct pci_channel *hose)
69 69
70 pci_bus_size_bridges(bus); 70 pci_bus_size_bridges(bus);
71 pci_bus_assign_resources(bus); 71 pci_bus_assign_resources(bus);
72 pci_enable_bridges(bus);
73 } else { 72 } else {
74 pci_free_resource_list(&resources); 73 pci_free_resource_list(&resources);
75 } 74 }
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f689..362192ed12fe 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
36} 36}
37 37
38static inline void 38static inline void
39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 39tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
40{ 40{
41 tlb->mm = mm; 41 tlb->mm = mm;
42 tlb->fullmm = full_mm_flush; 42 tlb->start = start;
43 tlb->end = end;
44 tlb->fullmm = !(start | (end+1));
43 45
44 init_tlb_gather(tlb); 46 init_tlb_gather(tlb);
45} 47}
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index bb11e1925178..4df4d4ffe39b 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/serial.h> 13#include <linux/serial.h>
14#include <linux/serial_sci.h> 14#include <linux/serial_sci.h>
15#include <linux/sh_eth.h>
15#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
16#include <linux/io.h> 17#include <linux/io.h>
17 18
@@ -110,10 +111,16 @@ static struct platform_device scif2_device = {
110 }, 111 },
111}; 112};
112 113
114static struct sh_eth_plat_data eth_platform_data = {
115 .phy = 1,
116 .edmac_endian = EDMAC_LITTLE_ENDIAN,
117 .phy_interface = PHY_INTERFACE_MODE_MII,
118};
119
113static struct resource eth_resources[] = { 120static struct resource eth_resources[] = {
114 [0] = { 121 [0] = {
115 .start = 0xfb000000, 122 .start = 0xfb000000,
116 .end = 0xfb0001c8, 123 .end = 0xfb0001c7,
117 .flags = IORESOURCE_MEM, 124 .flags = IORESOURCE_MEM,
118 }, 125 },
119 [1] = { 126 [1] = {
@@ -127,7 +134,7 @@ static struct platform_device eth_device = {
127 .name = "sh7619-ether", 134 .name = "sh7619-ether",
128 .id = -1, 135 .id = -1,
129 .dev = { 136 .dev = {
130 .platform_data = (void *)1, 137 .platform_data = &eth_platform_data,
131 }, 138 },
132 .num_resources = ARRAY_SIZE(eth_resources), 139 .num_resources = ARRAY_SIZE(eth_resources),
133 .resource = eth_resources, 140 .resource = eth_resources,
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index d30622592116..e3abfd4277e2 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -91,13 +91,11 @@ static struct cpuidle_driver cpuidle_driver = {
91 91
92int __init sh_mobile_setup_cpuidle(void) 92int __init sh_mobile_setup_cpuidle(void)
93{ 93{
94 int ret;
95
96 if (sh_mobile_sleep_supported & SUSP_SH_SF) 94 if (sh_mobile_sleep_supported & SUSP_SH_SF)
97 cpuidle_driver.states[1].disabled = false; 95 cpuidle_driver.states[1].disabled = false;
98 96
99 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) 97 if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
100 cpuidle_driver.states[2].disabled = false; 98 cpuidle_driver.states[2].disabled = false;
101 99
102 return cpuidle_register(&cpuidle_driver); 100 return cpuidle_register(&cpuidle_driver, NULL);
103} 101}
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index c7de3323819c..8d284801f232 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -48,8 +48,8 @@ do { save_and_clear_fpu(); \
48 "wrpr %%g0, 14, %%pil\n\t" \ 48 "wrpr %%g0, 14, %%pil\n\t" \
49 "brz,pt %%o7, switch_to_pc\n\t" \ 49 "brz,pt %%o7, switch_to_pc\n\t" \
50 " mov %%g7, %0\n\t" \ 50 " mov %%g7, %0\n\t" \
51 "sethi %%hi(ret_from_syscall), %%g1\n\t" \ 51 "sethi %%hi(ret_from_fork), %%g1\n\t" \
52 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \ 52 "jmpl %%g1 + %%lo(ret_from_fork), %%g0\n\t" \
53 " nop\n\t" \ 53 " nop\n\t" \
54 ".globl switch_to_pc\n\t" \ 54 ".globl switch_to_pc\n\t" \
55 "switch_to_pc:\n\t" \ 55 "switch_to_pc:\n\t" \
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e4de74c2c9b0..cb5d272d658a 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -327,6 +327,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
327 case SUN4V_CHIP_NIAGARA3: 327 case SUN4V_CHIP_NIAGARA3:
328 case SUN4V_CHIP_NIAGARA4: 328 case SUN4V_CHIP_NIAGARA4:
329 case SUN4V_CHIP_NIAGARA5: 329 case SUN4V_CHIP_NIAGARA5:
330 case SUN4V_CHIP_SPARC64X:
330 rover_inc_table = niagara_iterate_method; 331 rover_inc_table = niagara_iterate_method;
331 break; 332 break;
332 default: 333 default:
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index e2a030045089..33c02b15f478 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -839,7 +839,7 @@ sys_sigreturn:
839 nop 839 nop
840 840
841 call syscall_trace 841 call syscall_trace
842 nop 842 mov 1, %o1
843 843
8441: 8441:
845 /* We don't want to muck with user registers like a 845 /* We don't want to muck with user registers like a
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index c8759550799f..53c0a82e6030 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -42,7 +42,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
42{ 42{
43 struct thread_info *t = task_thread_info(p); 43 struct thread_info *t = task_thread_info(p);
44 extern unsigned int switch_to_pc; 44 extern unsigned int switch_to_pc;
45 extern unsigned int ret_from_syscall; 45 extern unsigned int ret_from_fork;
46 struct reg_window *win; 46 struct reg_window *win;
47 unsigned long pc, cwp; 47 unsigned long pc, cwp;
48 int i; 48 int i;
@@ -66,7 +66,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
66 gdb_regs[i] = 0; 66 gdb_regs[i] = 0;
67 67
68 if (t->new_child) 68 if (t->new_child)
69 pc = (unsigned long) &ret_from_syscall; 69 pc = (unsigned long) &ret_from_fork;
70 else 70 else
71 pc = (unsigned long) &switch_to_pc; 71 pc = (unsigned long) &switch_to_pc;
72 72
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 0746e5e32b37..fde5a419cf27 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -25,11 +25,10 @@ kvmap_itlb:
25 */ 25 */
26kvmap_itlb_4v: 26kvmap_itlb_4v:
27 27
28kvmap_itlb_nonlinear:
29 /* Catch kernel NULL pointer calls. */ 28 /* Catch kernel NULL pointer calls. */
30 sethi %hi(PAGE_SIZE), %g5 29 sethi %hi(PAGE_SIZE), %g5
31 cmp %g4, %g5 30 cmp %g4, %g5
32 bleu,pn %xcc, kvmap_dtlb_longpath 31 blu,pn %xcc, kvmap_itlb_longpath
33 nop 32 nop
34 33
35 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) 34 KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 7ff45e4ba681..773c1f2983ce 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -14,6 +14,7 @@
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/export.h>
17#include <linux/ptrace.h> 18#include <linux/ptrace.h>
18#include <linux/user.h> 19#include <linux/user.h>
19#include <linux/smp.h> 20#include <linux/smp.h>
@@ -116,6 +117,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
116 117
117 preempt_enable(); 118 preempt_enable();
118} 119}
120EXPORT_SYMBOL_GPL(flush_ptrace_access);
119 121
120static int get_from_target(struct task_struct *target, unsigned long uaddr, 122static int get_from_target(struct task_struct *target, unsigned long uaddr,
121 void *kbuf, int len) 123 void *kbuf, int len)
@@ -1087,7 +1089,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1087 audit_syscall_exit(regs); 1089 audit_syscall_exit(regs);
1088 1090
1089 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) 1091 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1090 trace_sys_exit(regs, regs->u_regs[UREG_G1]); 1092 trace_sys_exit(regs, regs->u_regs[UREG_I0]);
1091 1093
1092 if (test_thread_flag(TIF_SYSCALL_TRACE)) 1094 if (test_thread_flag(TIF_SYSCALL_TRACE))
1093 tracehook_report_syscall_exit(regs, 0); 1095 tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 13785547e435..3fdb455e3318 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -499,12 +499,14 @@ static void __init init_sparc64_elf_hwcap(void)
499 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 499 sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
500 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 500 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
501 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 501 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
502 sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 502 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
503 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
503 cap |= HWCAP_SPARC_BLKINIT; 504 cap |= HWCAP_SPARC_BLKINIT;
504 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 505 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
505 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 506 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
506 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 507 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
507 sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 508 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
509 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
508 cap |= HWCAP_SPARC_N2; 510 cap |= HWCAP_SPARC_N2;
509 } 511 }
510 512
@@ -530,13 +532,15 @@ static void __init init_sparc64_elf_hwcap(void)
530 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || 532 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
531 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 533 sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
532 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 534 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
533 sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 535 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
536 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
534 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | 537 cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
535 AV_SPARC_ASI_BLK_INIT | 538 AV_SPARC_ASI_BLK_INIT |
536 AV_SPARC_POPC); 539 AV_SPARC_POPC);
537 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || 540 if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
538 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || 541 sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
539 sun4v_chip_type == SUN4V_CHIP_NIAGARA5) 542 sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
543 sun4v_chip_type == SUN4V_CHIP_SPARC64X)
540 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | 544 cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
541 AV_SPARC_FMAF); 545 AV_SPARC_FMAF);
542 } 546 }
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 22a1098961f5..d950197a17e1 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -98,8 +98,8 @@ sys_clone:
98 ba,pt %xcc, sparc_do_fork 98 ba,pt %xcc, sparc_do_fork
99 add %sp, PTREGS_OFF, %o2 99 add %sp, PTREGS_OFF, %o2
100 100
101 .globl ret_from_syscall 101 .globl ret_from_fork
102ret_from_syscall: 102ret_from_fork:
103 /* Clear current_thread_info()->new_child. */ 103 /* Clear current_thread_info()->new_child. */
104 stb %g0, [%g6 + TI_NEW_CHILD] 104 stb %g0, [%g6 + TI_NEW_CHILD]
105 call schedule_tail 105 call schedule_tail
@@ -152,7 +152,7 @@ linux_syscall_trace32:
152 srl %i4, 0, %o4 152 srl %i4, 0, %o4
153 srl %i1, 0, %o1 153 srl %i1, 0, %o1
154 srl %i2, 0, %o2 154 srl %i2, 0, %o2
155 ba,pt %xcc, 2f 155 ba,pt %xcc, 5f
156 srl %i3, 0, %o3 156 srl %i3, 0, %o3
157 157
158linux_syscall_trace: 158linux_syscall_trace:
@@ -182,13 +182,13 @@ linux_sparc_syscall32:
182 srl %i1, 0, %o1 ! IEU0 Group 182 srl %i1, 0, %o1 ! IEU0 Group
183 ldx [%g6 + TI_FLAGS], %l0 ! Load 183 ldx [%g6 + TI_FLAGS], %l0 ! Load
184 184
185 srl %i5, 0, %o5 ! IEU1 185 srl %i3, 0, %o3 ! IEU0
186 srl %i2, 0, %o2 ! IEU0 Group 186 srl %i2, 0, %o2 ! IEU0 Group
187 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 187 andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
188 bne,pn %icc, linux_syscall_trace32 ! CTI 188 bne,pn %icc, linux_syscall_trace32 ! CTI
189 mov %i0, %l5 ! IEU1 189 mov %i0, %l5 ! IEU1
190 call %l7 ! CTI Group brk forced 1905: call %l7 ! CTI Group brk forced
191 srl %i3, 0, %o3 ! IEU0 191 srl %i5, 0, %o5 ! IEU1
192 ba,a,pt %xcc, 3f 192 ba,a,pt %xcc, 3f
193 193
194 /* Linux native system calls enter here... */ 194 /* Linux native system calls enter here... */
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index e0b1e13a0736..ad4bde3bb61e 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -129,7 +129,6 @@ startup_continue:
129 clr %l5 129 clr %l5
130 sethi %hi(num_kernel_image_mappings), %l6 130 sethi %hi(num_kernel_image_mappings), %l6
131 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 131 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
132 add %l6, 1, %l6
133 132
134 mov 15, %l7 133 mov 15, %l7
135 BRANCH_IF_ANY_CHEETAH(g1,g5,2f) 134 BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
@@ -222,7 +221,6 @@ niagara_lock_tlb:
222 clr %l5 221 clr %l5
223 sethi %hi(num_kernel_image_mappings), %l6 222 sethi %hi(num_kernel_image_mappings), %l6
224 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 223 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
225 add %l6, 1, %l6
226 224
2271: 2251:
228 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 226 mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 0c4e35e522fa..323335b9cd2b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -98,15 +98,6 @@ EXPORT_SYMBOL(___copy_from_user);
98EXPORT_SYMBOL(___copy_in_user); 98EXPORT_SYMBOL(___copy_in_user);
99EXPORT_SYMBOL(__clear_user); 99EXPORT_SYMBOL(__clear_user);
100 100
101/* RW semaphores */
102EXPORT_SYMBOL(__down_read);
103EXPORT_SYMBOL(__down_read_trylock);
104EXPORT_SYMBOL(__down_write);
105EXPORT_SYMBOL(__down_write_trylock);
106EXPORT_SYMBOL(__up_read);
107EXPORT_SYMBOL(__up_write);
108EXPORT_SYMBOL(__downgrade_write);
109
110/* Atomic counter implementation. */ 101/* Atomic counter implementation. */
111EXPORT_SYMBOL(atomic_add); 102EXPORT_SYMBOL(atomic_add);
112EXPORT_SYMBOL(atomic_add_ret); 103EXPORT_SYMBOL(atomic_add_ret);
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 24565a7ffe6d..6e1ed55f6cfc 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -26,6 +26,7 @@ config TILE
26 select HAVE_SYSCALL_TRACEPOINTS 26 select HAVE_SYSCALL_TRACEPOINTS
27 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 27 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
28 select HAVE_DEBUG_STACKOVERFLOW 28 select HAVE_DEBUG_STACKOVERFLOW
29 select ARCH_WANT_FRAME_POINTERS
29 30
30# FIXME: investigate whether we need/want these options. 31# FIXME: investigate whether we need/want these options.
31# select HAVE_IOREMAP_PROT 32# select HAVE_IOREMAP_PROT
@@ -64,6 +65,9 @@ config HUGETLB_SUPER_PAGES
64 depends on HUGETLB_PAGE && TILEGX 65 depends on HUGETLB_PAGE && TILEGX
65 def_bool y 66 def_bool y
66 67
68config GENERIC_TIME_VSYSCALL
69 def_bool y
70
67# FIXME: tilegx can implement a more efficient rwsem. 71# FIXME: tilegx can implement a more efficient rwsem.
68config RWSEM_GENERIC_SPINLOCK 72config RWSEM_GENERIC_SPINLOCK
69 def_bool y 73 def_bool y
@@ -112,10 +116,19 @@ config SMP
112config HVC_TILE 116config HVC_TILE
113 depends on TTY 117 depends on TTY
114 select HVC_DRIVER 118 select HVC_DRIVER
119 select HVC_IRQ if TILEGX
115 def_bool y 120 def_bool y
116 121
117config TILEGX 122config TILEGX
118 bool "Building with TILE-Gx (64-bit) compiler and toolchain" 123 bool "Building for TILE-Gx (64-bit) processor"
124 select HAVE_FUNCTION_TRACER
125 select HAVE_FUNCTION_TRACE_MCOUNT_TEST
126 select HAVE_FUNCTION_GRAPH_TRACER
127 select HAVE_DYNAMIC_FTRACE
128 select HAVE_FTRACE_MCOUNT_RECORD
129 select HAVE_KPROBES
130 select HAVE_KRETPROBES
131 select HAVE_ARCH_KGDB
119 132
120config TILEPRO 133config TILEPRO
121 def_bool !TILEGX 134 def_bool !TILEGX
@@ -194,7 +207,7 @@ config SYSVIPC_COMPAT
194 def_bool y 207 def_bool y
195 depends on COMPAT && SYSVIPC 208 depends on COMPAT && SYSVIPC
196 209
197# We do not currently support disabling HIGHMEM on tile64 and tilepro. 210# We do not currently support disabling HIGHMEM on tilepro.
198config HIGHMEM 211config HIGHMEM
199 bool # "Support for more than 512 MB of RAM" 212 bool # "Support for more than 512 MB of RAM"
200 default !TILEGX 213 default !TILEGX
@@ -300,6 +313,8 @@ config PAGE_OFFSET
300 313
301source "mm/Kconfig" 314source "mm/Kconfig"
302 315
316source "kernel/Kconfig.preempt"
317
303config CMDLINE_BOOL 318config CMDLINE_BOOL
304 bool "Built-in kernel command line" 319 bool "Built-in kernel command line"
305 default n 320 default n
@@ -396,8 +411,20 @@ config NO_IOMEM
396config NO_IOPORT 411config NO_IOPORT
397 def_bool !PCI 412 def_bool !PCI
398 413
414config TILE_PCI_IO
415 bool "PCI I/O space support"
416 default n
417 depends on PCI
418 depends on TILEGX
419 ---help---
420 Enable PCI I/O space support on TILEGx. Since the PCI I/O space
421 is used by few modern PCIe endpoint devices, its support is disabled
422 by default to save the TRIO PIO Region resource for other purposes.
423
399source "drivers/pci/Kconfig" 424source "drivers/pci/Kconfig"
400 425
426source "drivers/pci/pcie/Kconfig"
427
401config TILE_USB 428config TILE_USB
402 tristate "Tilera USB host adapter support" 429 tristate "Tilera USB host adapter support"
403 default y 430 default y
diff --git a/arch/tile/Kconfig.debug b/arch/tile/Kconfig.debug
index 9165ea979e85..19734d3ab1e8 100644
--- a/arch/tile/Kconfig.debug
+++ b/arch/tile/Kconfig.debug
@@ -14,14 +14,12 @@ config EARLY_PRINTK
14 with klogd/syslogd. You should normally N here, 14 with klogd/syslogd. You should normally N here,
15 unless you want to debug such a crash. 15 unless you want to debug such a crash.
16 16
17config DEBUG_EXTRA_FLAGS 17config TILE_HVGLUE_TRACE
18 string "Additional compiler arguments when building with '-g'" 18 bool "Provide wrapper functions for hypervisor ABI calls"
19 depends on DEBUG_INFO 19 default n
20 default ""
21 help 20 help
22 Debug info can be large, and flags like 21 Provide wrapper functions for the hypervisor ABI calls
23 `-femit-struct-debug-baseonly' can reduce the kernel file 22 defined in arch/tile/kernel/hvglue.S. This allows tracing
24 size and build time noticeably. Such flags are often 23 mechanisms, etc., to have visibility into those calls.
25 helpful if the main use of debug info is line number info.
26 24
27endmenu 25endmenu
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 3d15364c6071..4dc380a519d4 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -30,10 +30,6 @@ endif
30# In kernel modules, this causes load failures due to unsupported relocations. 30# In kernel modules, this causes load failures due to unsupported relocations.
31KBUILD_CFLAGS += -fno-asynchronous-unwind-tables 31KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
32 32
33ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"")
34KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS)
35endif
36
37LIBGCC_PATH := \ 33LIBGCC_PATH := \
38 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) 34 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
39 35
diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig
index 47684815e5c8..730e40d9cf62 100644
--- a/arch/tile/configs/tilegx_defconfig
+++ b/arch/tile/configs/tilegx_defconfig
@@ -1,16 +1,15 @@
1CONFIG_TILEGX=y 1CONFIG_TILEGX=y
2CONFIG_EXPERIMENTAL=y
3# CONFIG_LOCALVERSION_AUTO is not set
4CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y 3CONFIG_POSIX_MQUEUE=y
4CONFIG_FHANDLE=y
5CONFIG_AUDIT=y
6CONFIG_NO_HZ=y
6CONFIG_BSD_PROCESS_ACCT=y 7CONFIG_BSD_PROCESS_ACCT=y
7CONFIG_BSD_PROCESS_ACCT_V3=y 8CONFIG_BSD_PROCESS_ACCT_V3=y
8CONFIG_FHANDLE=y
9CONFIG_TASKSTATS=y 9CONFIG_TASKSTATS=y
10CONFIG_TASK_DELAY_ACCT=y 10CONFIG_TASK_DELAY_ACCT=y
11CONFIG_TASK_XACCT=y 11CONFIG_TASK_XACCT=y
12CONFIG_TASK_IO_ACCOUNTING=y 12CONFIG_TASK_IO_ACCOUNTING=y
13CONFIG_AUDIT=y
14CONFIG_LOG_BUF_SHIFT=19 13CONFIG_LOG_BUF_SHIFT=19
15CONFIG_CGROUPS=y 14CONFIG_CGROUPS=y
16CONFIG_CGROUP_DEBUG=y 15CONFIG_CGROUP_DEBUG=y
@@ -18,18 +17,18 @@ CONFIG_CGROUP_DEVICE=y
18CONFIG_CPUSETS=y 17CONFIG_CPUSETS=y
19CONFIG_CGROUP_CPUACCT=y 18CONFIG_CGROUP_CPUACCT=y
20CONFIG_RESOURCE_COUNTERS=y 19CONFIG_RESOURCE_COUNTERS=y
21CONFIG_CGROUP_MEMCG=y
22CONFIG_CGROUP_MEMCG_SWAP=y
23CONFIG_CGROUP_SCHED=y 20CONFIG_CGROUP_SCHED=y
24CONFIG_RT_GROUP_SCHED=y 21CONFIG_RT_GROUP_SCHED=y
25CONFIG_BLK_CGROUP=y 22CONFIG_BLK_CGROUP=y
26CONFIG_NAMESPACES=y 23CONFIG_NAMESPACES=y
27CONFIG_RELAY=y 24CONFIG_RELAY=y
28CONFIG_BLK_DEV_INITRD=y 25CONFIG_BLK_DEV_INITRD=y
26CONFIG_RD_XZ=y
29CONFIG_SYSCTL_SYSCALL=y 27CONFIG_SYSCTL_SYSCALL=y
30CONFIG_EMBEDDED=y 28CONFIG_EMBEDDED=y
31# CONFIG_COMPAT_BRK is not set 29# CONFIG_COMPAT_BRK is not set
32CONFIG_PROFILING=y 30CONFIG_PROFILING=y
31CONFIG_KPROBES=y
33CONFIG_MODULES=y 32CONFIG_MODULES=y
34CONFIG_MODULE_FORCE_LOAD=y 33CONFIG_MODULE_FORCE_LOAD=y
35CONFIG_MODULE_UNLOAD=y 34CONFIG_MODULE_UNLOAD=y
@@ -45,12 +44,12 @@ CONFIG_UNIXWARE_DISKLABEL=y
45CONFIG_SGI_PARTITION=y 44CONFIG_SGI_PARTITION=y
46CONFIG_SUN_PARTITION=y 45CONFIG_SUN_PARTITION=y
47CONFIG_KARMA_PARTITION=y 46CONFIG_KARMA_PARTITION=y
48CONFIG_EFI_PARTITION=y
49CONFIG_CFQ_GROUP_IOSCHED=y 47CONFIG_CFQ_GROUP_IOSCHED=y
50CONFIG_NR_CPUS=100 48CONFIG_NR_CPUS=100
51CONFIG_NO_HZ=y
52CONFIG_HIGH_RES_TIMERS=y
53CONFIG_HZ_100=y 49CONFIG_HZ_100=y
50# CONFIG_COMPACTION is not set
51CONFIG_PREEMPT_VOLUNTARY=y
52CONFIG_TILE_PCI_IO=y
54CONFIG_PCI_DEBUG=y 53CONFIG_PCI_DEBUG=y
55# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 54# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
56CONFIG_BINFMT_MISC=y 55CONFIG_BINFMT_MISC=y
@@ -108,150 +107,9 @@ CONFIG_IPV6_MULTIPLE_TABLES=y
108CONFIG_IPV6_MROUTE=y 107CONFIG_IPV6_MROUTE=y
109CONFIG_IPV6_PIMSM_V2=y 108CONFIG_IPV6_PIMSM_V2=y
110CONFIG_NETLABEL=y 109CONFIG_NETLABEL=y
111CONFIG_NETFILTER=y
112CONFIG_NF_CONNTRACK=m
113CONFIG_NF_CONNTRACK_SECMARK=y
114CONFIG_NF_CONNTRACK_ZONES=y
115CONFIG_NF_CONNTRACK_EVENTS=y
116CONFIG_NF_CT_PROTO_DCCP=m
117CONFIG_NF_CT_PROTO_UDPLITE=m
118CONFIG_NF_CONNTRACK_AMANDA=m
119CONFIG_NF_CONNTRACK_FTP=m
120CONFIG_NF_CONNTRACK_H323=m
121CONFIG_NF_CONNTRACK_IRC=m
122CONFIG_NF_CONNTRACK_NETBIOS_NS=m
123CONFIG_NF_CONNTRACK_PPTP=m
124CONFIG_NF_CONNTRACK_SANE=m
125CONFIG_NF_CONNTRACK_SIP=m
126CONFIG_NF_CONNTRACK_TFTP=m
127CONFIG_NETFILTER_TPROXY=m
128CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
129CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
130CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
131CONFIG_NETFILTER_XT_TARGET_CT=m
132CONFIG_NETFILTER_XT_TARGET_DSCP=m
133CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
134CONFIG_NETFILTER_XT_TARGET_MARK=m
135CONFIG_NETFILTER_XT_TARGET_NFLOG=m
136CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
137CONFIG_NETFILTER_XT_TARGET_TEE=m
138CONFIG_NETFILTER_XT_TARGET_TPROXY=m
139CONFIG_NETFILTER_XT_TARGET_TRACE=m
140CONFIG_NETFILTER_XT_TARGET_SECMARK=m
141CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
142CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
143CONFIG_NETFILTER_XT_MATCH_CLUSTER=m
144CONFIG_NETFILTER_XT_MATCH_COMMENT=m
145CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
146CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
147CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
148CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
149CONFIG_NETFILTER_XT_MATCH_DCCP=m
150CONFIG_NETFILTER_XT_MATCH_DSCP=m
151CONFIG_NETFILTER_XT_MATCH_ESP=m
152CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
153CONFIG_NETFILTER_XT_MATCH_HELPER=m
154CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
155CONFIG_NETFILTER_XT_MATCH_IPVS=m
156CONFIG_NETFILTER_XT_MATCH_LENGTH=m
157CONFIG_NETFILTER_XT_MATCH_LIMIT=m
158CONFIG_NETFILTER_XT_MATCH_MAC=m
159CONFIG_NETFILTER_XT_MATCH_MARK=m
160CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
161CONFIG_NETFILTER_XT_MATCH_OSF=m
162CONFIG_NETFILTER_XT_MATCH_OWNER=m
163CONFIG_NETFILTER_XT_MATCH_POLICY=m
164CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
165CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
166CONFIG_NETFILTER_XT_MATCH_QUOTA=m
167CONFIG_NETFILTER_XT_MATCH_RATEEST=m
168CONFIG_NETFILTER_XT_MATCH_REALM=m
169CONFIG_NETFILTER_XT_MATCH_RECENT=m
170CONFIG_NETFILTER_XT_MATCH_SOCKET=m
171CONFIG_NETFILTER_XT_MATCH_STATE=m
172CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
173CONFIG_NETFILTER_XT_MATCH_STRING=m
174CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
175CONFIG_NETFILTER_XT_MATCH_TIME=m
176CONFIG_NETFILTER_XT_MATCH_U32=m
177CONFIG_IP_VS=m
178CONFIG_IP_VS_IPV6=y
179CONFIG_IP_VS_PROTO_TCP=y
180CONFIG_IP_VS_PROTO_UDP=y
181CONFIG_IP_VS_PROTO_ESP=y
182CONFIG_IP_VS_PROTO_AH=y
183CONFIG_IP_VS_PROTO_SCTP=y
184CONFIG_IP_VS_RR=m
185CONFIG_IP_VS_WRR=m
186CONFIG_IP_VS_LC=m
187CONFIG_IP_VS_WLC=m
188CONFIG_IP_VS_LBLC=m
189CONFIG_IP_VS_LBLCR=m
190CONFIG_IP_VS_SED=m
191CONFIG_IP_VS_NQ=m
192CONFIG_NF_CONNTRACK_IPV4=m
193# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
194CONFIG_IP_NF_QUEUE=m
195CONFIG_IP_NF_IPTABLES=y
196CONFIG_IP_NF_MATCH_AH=m
197CONFIG_IP_NF_MATCH_ECN=m
198CONFIG_IP_NF_MATCH_TTL=m
199CONFIG_IP_NF_FILTER=y
200CONFIG_IP_NF_TARGET_REJECT=y
201CONFIG_IP_NF_TARGET_LOG=m
202CONFIG_IP_NF_TARGET_ULOG=m
203CONFIG_IP_NF_MANGLE=m
204CONFIG_IP_NF_TARGET_ECN=m
205CONFIG_IP_NF_TARGET_TTL=m
206CONFIG_IP_NF_RAW=m
207CONFIG_IP_NF_SECURITY=m
208CONFIG_IP_NF_ARPTABLES=m
209CONFIG_IP_NF_ARPFILTER=m
210CONFIG_IP_NF_ARP_MANGLE=m
211CONFIG_NF_CONNTRACK_IPV6=m
212CONFIG_IP6_NF_QUEUE=m
213CONFIG_IP6_NF_IPTABLES=m
214CONFIG_IP6_NF_MATCH_AH=m
215CONFIG_IP6_NF_MATCH_EUI64=m
216CONFIG_IP6_NF_MATCH_FRAG=m
217CONFIG_IP6_NF_MATCH_OPTS=m
218CONFIG_IP6_NF_MATCH_HL=m
219CONFIG_IP6_NF_MATCH_IPV6HEADER=m
220CONFIG_IP6_NF_MATCH_MH=m
221CONFIG_IP6_NF_MATCH_RT=m
222CONFIG_IP6_NF_TARGET_HL=m
223CONFIG_IP6_NF_TARGET_LOG=m
224CONFIG_IP6_NF_FILTER=m
225CONFIG_IP6_NF_TARGET_REJECT=m
226CONFIG_IP6_NF_MANGLE=m
227CONFIG_IP6_NF_RAW=m
228CONFIG_IP6_NF_SECURITY=m
229CONFIG_BRIDGE_NF_EBTABLES=m
230CONFIG_BRIDGE_EBT_BROUTE=m
231CONFIG_BRIDGE_EBT_T_FILTER=m
232CONFIG_BRIDGE_EBT_T_NAT=m
233CONFIG_BRIDGE_EBT_802_3=m
234CONFIG_BRIDGE_EBT_AMONG=m
235CONFIG_BRIDGE_EBT_ARP=m
236CONFIG_BRIDGE_EBT_IP=m
237CONFIG_BRIDGE_EBT_IP6=m
238CONFIG_BRIDGE_EBT_LIMIT=m
239CONFIG_BRIDGE_EBT_MARK=m
240CONFIG_BRIDGE_EBT_PKTTYPE=m
241CONFIG_BRIDGE_EBT_STP=m
242CONFIG_BRIDGE_EBT_VLAN=m
243CONFIG_BRIDGE_EBT_ARPREPLY=m
244CONFIG_BRIDGE_EBT_DNAT=m
245CONFIG_BRIDGE_EBT_MARK_T=m
246CONFIG_BRIDGE_EBT_REDIRECT=m
247CONFIG_BRIDGE_EBT_SNAT=m
248CONFIG_BRIDGE_EBT_LOG=m
249CONFIG_BRIDGE_EBT_ULOG=m
250CONFIG_BRIDGE_EBT_NFLOG=m
251CONFIG_RDS=m 110CONFIG_RDS=m
252CONFIG_RDS_TCP=m 111CONFIG_RDS_TCP=m
253CONFIG_BRIDGE=m 112CONFIG_BRIDGE=m
254CONFIG_NET_DSA=y
255CONFIG_VLAN_8021Q=m 113CONFIG_VLAN_8021Q=m
256CONFIG_VLAN_8021Q_GVRP=y 114CONFIG_VLAN_8021Q_GVRP=y
257CONFIG_PHONET=m 115CONFIG_PHONET=m
@@ -292,13 +150,13 @@ CONFIG_NET_ACT_POLICE=m
292CONFIG_NET_ACT_GACT=m 150CONFIG_NET_ACT_GACT=m
293CONFIG_GACT_PROB=y 151CONFIG_GACT_PROB=y
294CONFIG_NET_ACT_MIRRED=m 152CONFIG_NET_ACT_MIRRED=m
295CONFIG_NET_ACT_IPT=m
296CONFIG_NET_ACT_NAT=m 153CONFIG_NET_ACT_NAT=m
297CONFIG_NET_ACT_PEDIT=m 154CONFIG_NET_ACT_PEDIT=m
298CONFIG_NET_ACT_SIMP=m 155CONFIG_NET_ACT_SIMP=m
299CONFIG_NET_ACT_SKBEDIT=m 156CONFIG_NET_ACT_SKBEDIT=m
300CONFIG_NET_CLS_IND=y 157CONFIG_NET_CLS_IND=y
301CONFIG_DCB=y 158CONFIG_DCB=y
159CONFIG_DNS_RESOLVER=y
302# CONFIG_WIRELESS is not set 160# CONFIG_WIRELESS is not set
303CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 161CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
304CONFIG_DEVTMPFS=y 162CONFIG_DEVTMPFS=y
@@ -317,10 +175,12 @@ CONFIG_BLK_DEV_SD=y
317CONFIG_SCSI_CONSTANTS=y 175CONFIG_SCSI_CONSTANTS=y
318CONFIG_SCSI_LOGGING=y 176CONFIG_SCSI_LOGGING=y
319CONFIG_SCSI_SAS_ATA=y 177CONFIG_SCSI_SAS_ATA=y
178CONFIG_ISCSI_TCP=m
320CONFIG_SCSI_MVSAS=y 179CONFIG_SCSI_MVSAS=y
321# CONFIG_SCSI_MVSAS_DEBUG is not set 180# CONFIG_SCSI_MVSAS_DEBUG is not set
322CONFIG_SCSI_MVSAS_TASKLET=y 181CONFIG_SCSI_MVSAS_TASKLET=y
323CONFIG_ATA=y 182CONFIG_ATA=y
183CONFIG_SATA_AHCI=y
324CONFIG_SATA_SIL24=y 184CONFIG_SATA_SIL24=y
325# CONFIG_ATA_SFF is not set 185# CONFIG_ATA_SFF is not set
326CONFIG_MD=y 186CONFIG_MD=y
@@ -343,6 +203,12 @@ CONFIG_DM_MULTIPATH_QL=m
343CONFIG_DM_MULTIPATH_ST=m 203CONFIG_DM_MULTIPATH_ST=m
344CONFIG_DM_DELAY=m 204CONFIG_DM_DELAY=m
345CONFIG_DM_UEVENT=y 205CONFIG_DM_UEVENT=y
206CONFIG_TARGET_CORE=m
207CONFIG_TCM_IBLOCK=m
208CONFIG_TCM_FILEIO=m
209CONFIG_TCM_PSCSI=m
210CONFIG_LOOPBACK_TARGET=m
211CONFIG_ISCSI_TARGET=m
346CONFIG_FUSION=y 212CONFIG_FUSION=y
347CONFIG_FUSION_SAS=y 213CONFIG_FUSION_SAS=y
348CONFIG_NETDEVICES=y 214CONFIG_NETDEVICES=y
@@ -359,42 +225,8 @@ CONFIG_VETH=m
359CONFIG_NET_DSA_MV88E6060=y 225CONFIG_NET_DSA_MV88E6060=y
360CONFIG_NET_DSA_MV88E6131=y 226CONFIG_NET_DSA_MV88E6131=y
361CONFIG_NET_DSA_MV88E6123_61_65=y 227CONFIG_NET_DSA_MV88E6123_61_65=y
362# CONFIG_NET_VENDOR_3COM is not set 228CONFIG_SKY2=y
363# CONFIG_NET_VENDOR_ADAPTEC is not set 229CONFIG_PTP_1588_CLOCK_TILEGX=y
364# CONFIG_NET_VENDOR_ALTEON is not set
365# CONFIG_NET_VENDOR_AMD is not set
366# CONFIG_NET_VENDOR_ATHEROS is not set
367# CONFIG_NET_VENDOR_BROADCOM is not set
368# CONFIG_NET_VENDOR_BROCADE is not set
369# CONFIG_NET_VENDOR_CHELSIO is not set
370# CONFIG_NET_VENDOR_CISCO is not set
371# CONFIG_NET_VENDOR_DEC is not set
372# CONFIG_NET_VENDOR_DLINK is not set
373# CONFIG_NET_VENDOR_EMULEX is not set
374# CONFIG_NET_VENDOR_EXAR is not set
375# CONFIG_NET_VENDOR_HP is not set
376# CONFIG_NET_VENDOR_INTEL is not set
377# CONFIG_NET_VENDOR_MARVELL is not set
378# CONFIG_NET_VENDOR_MELLANOX is not set
379# CONFIG_NET_VENDOR_MICREL is not set
380# CONFIG_NET_VENDOR_MYRI is not set
381# CONFIG_NET_VENDOR_NATSEMI is not set
382# CONFIG_NET_VENDOR_NVIDIA is not set
383# CONFIG_NET_VENDOR_OKI is not set
384# CONFIG_NET_PACKET_ENGINE is not set
385# CONFIG_NET_VENDOR_QLOGIC is not set
386# CONFIG_NET_VENDOR_REALTEK is not set
387# CONFIG_NET_VENDOR_RDC is not set
388# CONFIG_NET_VENDOR_SEEQ is not set
389# CONFIG_NET_VENDOR_SILAN is not set
390# CONFIG_NET_VENDOR_SIS is not set
391# CONFIG_NET_VENDOR_SMSC is not set
392# CONFIG_NET_VENDOR_STMICRO is not set
393# CONFIG_NET_VENDOR_SUN is not set
394# CONFIG_NET_VENDOR_TEHUTI is not set
395# CONFIG_NET_VENDOR_TI is not set
396# CONFIG_TILE_NET is not set
397# CONFIG_NET_VENDOR_VIA is not set
398# CONFIG_WLAN is not set 230# CONFIG_WLAN is not set
399# CONFIG_INPUT_MOUSEDEV is not set 231# CONFIG_INPUT_MOUSEDEV is not set
400# CONFIG_INPUT_KEYBOARD is not set 232# CONFIG_INPUT_KEYBOARD is not set
@@ -402,6 +234,7 @@ CONFIG_NET_DSA_MV88E6123_61_65=y
402# CONFIG_SERIO is not set 234# CONFIG_SERIO is not set
403# CONFIG_VT is not set 235# CONFIG_VT is not set
404# CONFIG_LEGACY_PTYS is not set 236# CONFIG_LEGACY_PTYS is not set
237CONFIG_SERIAL_TILEGX=y
405CONFIG_HW_RANDOM=y 238CONFIG_HW_RANDOM=y
406CONFIG_HW_RANDOM_TIMERIOMEM=m 239CONFIG_HW_RANDOM_TIMERIOMEM=m
407CONFIG_I2C=y 240CONFIG_I2C=y
@@ -410,13 +243,16 @@ CONFIG_I2C_CHARDEV=y
410CONFIG_WATCHDOG=y 243CONFIG_WATCHDOG=y
411CONFIG_WATCHDOG_NOWAYOUT=y 244CONFIG_WATCHDOG_NOWAYOUT=y
412# CONFIG_VGA_ARB is not set 245# CONFIG_VGA_ARB is not set
413# CONFIG_HID_SUPPORT is not set 246CONFIG_DRM=m
247CONFIG_DRM_TDFX=m
248CONFIG_DRM_R128=m
249CONFIG_DRM_MGA=m
250CONFIG_DRM_VIA=m
251CONFIG_DRM_SAVAGE=m
414CONFIG_USB=y 252CONFIG_USB=y
415# CONFIG_USB_DEVICE_CLASS is not set
416CONFIG_USB_EHCI_HCD=y 253CONFIG_USB_EHCI_HCD=y
417CONFIG_USB_OHCI_HCD=y 254CONFIG_USB_OHCI_HCD=y
418CONFIG_USB_STORAGE=y 255CONFIG_USB_STORAGE=y
419CONFIG_USB_LIBUSUAL=y
420CONFIG_EDAC=y 256CONFIG_EDAC=y
421CONFIG_EDAC_MM_EDAC=y 257CONFIG_EDAC_MM_EDAC=y
422CONFIG_RTC_CLASS=y 258CONFIG_RTC_CLASS=y
@@ -464,9 +300,8 @@ CONFIG_ECRYPT_FS=m
464CONFIG_CRAMFS=m 300CONFIG_CRAMFS=m
465CONFIG_SQUASHFS=m 301CONFIG_SQUASHFS=m
466CONFIG_NFS_FS=m 302CONFIG_NFS_FS=m
467CONFIG_NFS_V3=y
468CONFIG_NFS_V3_ACL=y 303CONFIG_NFS_V3_ACL=y
469CONFIG_NFS_V4=y 304CONFIG_NFS_V4=m
470CONFIG_NFS_V4_1=y 305CONFIG_NFS_V4_1=y
471CONFIG_NFS_FSCACHE=y 306CONFIG_NFS_FSCACHE=y
472CONFIG_NFSD=m 307CONFIG_NFSD=m
@@ -519,25 +354,28 @@ CONFIG_NLS_ISO8859_15=m
519CONFIG_NLS_KOI8_R=m 354CONFIG_NLS_KOI8_R=m
520CONFIG_NLS_KOI8_U=m 355CONFIG_NLS_KOI8_U=m
521CONFIG_NLS_UTF8=m 356CONFIG_NLS_UTF8=m
357CONFIG_DLM=m
522CONFIG_DLM_DEBUG=y 358CONFIG_DLM_DEBUG=y
359CONFIG_DYNAMIC_DEBUG=y
360CONFIG_DEBUG_INFO=y
361CONFIG_DEBUG_INFO_REDUCED=y
523# CONFIG_ENABLE_WARN_DEPRECATED is not set 362# CONFIG_ENABLE_WARN_DEPRECATED is not set
524CONFIG_MAGIC_SYSRQ=y
525CONFIG_STRIP_ASM_SYMS=y 363CONFIG_STRIP_ASM_SYMS=y
526CONFIG_DEBUG_FS=y 364CONFIG_DEBUG_FS=y
527CONFIG_HEADERS_CHECK=y 365CONFIG_HEADERS_CHECK=y
366# CONFIG_FRAME_POINTER is not set
367CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
368CONFIG_DEBUG_VM=y
369CONFIG_DEBUG_MEMORY_INIT=y
370CONFIG_DEBUG_STACKOVERFLOW=y
528CONFIG_LOCKUP_DETECTOR=y 371CONFIG_LOCKUP_DETECTOR=y
529CONFIG_SCHEDSTATS=y 372CONFIG_SCHEDSTATS=y
530CONFIG_TIMER_STATS=y 373CONFIG_TIMER_STATS=y
531CONFIG_DEBUG_INFO=y
532CONFIG_DEBUG_INFO_REDUCED=y
533CONFIG_DEBUG_VM=y
534CONFIG_DEBUG_MEMORY_INIT=y
535CONFIG_DEBUG_LIST=y 374CONFIG_DEBUG_LIST=y
536CONFIG_DEBUG_CREDENTIALS=y 375CONFIG_DEBUG_CREDENTIALS=y
537CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 376CONFIG_RCU_CPU_STALL_TIMEOUT=60
538CONFIG_DYNAMIC_DEBUG=y
539CONFIG_ASYNC_RAID6_TEST=m 377CONFIG_ASYNC_RAID6_TEST=m
540CONFIG_DEBUG_STACKOVERFLOW=y 378CONFIG_KGDB=y
541CONFIG_KEYS_DEBUG_PROC_KEYS=y 379CONFIG_KEYS_DEBUG_PROC_KEYS=y
542CONFIG_SECURITY=y 380CONFIG_SECURITY=y
543CONFIG_SECURITYFS=y 381CONFIG_SECURITYFS=y
@@ -546,7 +384,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y
546CONFIG_SECURITY_SELINUX=y 384CONFIG_SECURITY_SELINUX=y
547CONFIG_SECURITY_SELINUX_BOOTPARAM=y 385CONFIG_SECURITY_SELINUX_BOOTPARAM=y
548CONFIG_SECURITY_SELINUX_DISABLE=y 386CONFIG_SECURITY_SELINUX_DISABLE=y
549CONFIG_CRYPTO_NULL=m
550CONFIG_CRYPTO_PCRYPT=m 387CONFIG_CRYPTO_PCRYPT=m
551CONFIG_CRYPTO_CRYPTD=m 388CONFIG_CRYPTO_CRYPTD=m
552CONFIG_CRYPTO_TEST=m 389CONFIG_CRYPTO_TEST=m
@@ -559,14 +396,12 @@ CONFIG_CRYPTO_XTS=m
559CONFIG_CRYPTO_HMAC=y 396CONFIG_CRYPTO_HMAC=y
560CONFIG_CRYPTO_XCBC=m 397CONFIG_CRYPTO_XCBC=m
561CONFIG_CRYPTO_VMAC=m 398CONFIG_CRYPTO_VMAC=m
562CONFIG_CRYPTO_CRC32C=y
563CONFIG_CRYPTO_MICHAEL_MIC=m 399CONFIG_CRYPTO_MICHAEL_MIC=m
564CONFIG_CRYPTO_RMD128=m 400CONFIG_CRYPTO_RMD128=m
565CONFIG_CRYPTO_RMD160=m 401CONFIG_CRYPTO_RMD160=m
566CONFIG_CRYPTO_RMD256=m 402CONFIG_CRYPTO_RMD256=m
567CONFIG_CRYPTO_RMD320=m 403CONFIG_CRYPTO_RMD320=m
568CONFIG_CRYPTO_SHA1=y 404CONFIG_CRYPTO_SHA1=y
569CONFIG_CRYPTO_SHA256=m
570CONFIG_CRYPTO_SHA512=m 405CONFIG_CRYPTO_SHA512=m
571CONFIG_CRYPTO_TGR192=m 406CONFIG_CRYPTO_TGR192=m
572CONFIG_CRYPTO_WP512=m 407CONFIG_CRYPTO_WP512=m
diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig
index dd2b8f0c631f..80fc32ed0491 100644
--- a/arch/tile/configs/tilepro_defconfig
+++ b/arch/tile/configs/tilepro_defconfig
@@ -1,15 +1,14 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 2CONFIG_POSIX_MQUEUE=y
3CONFIG_AUDIT=y
4CONFIG_NO_HZ=y
5CONFIG_HIGH_RES_TIMERS=y
5CONFIG_BSD_PROCESS_ACCT=y 6CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_BSD_PROCESS_ACCT_V3=y 7CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_FHANDLE=y
8CONFIG_TASKSTATS=y 8CONFIG_TASKSTATS=y
9CONFIG_TASK_DELAY_ACCT=y 9CONFIG_TASK_DELAY_ACCT=y
10CONFIG_TASK_XACCT=y 10CONFIG_TASK_XACCT=y
11CONFIG_TASK_IO_ACCOUNTING=y 11CONFIG_TASK_IO_ACCOUNTING=y
12CONFIG_AUDIT=y
13CONFIG_LOG_BUF_SHIFT=19 12CONFIG_LOG_BUF_SHIFT=19
14CONFIG_CGROUPS=y 13CONFIG_CGROUPS=y
15CONFIG_CGROUP_DEBUG=y 14CONFIG_CGROUP_DEBUG=y
@@ -17,14 +16,13 @@ CONFIG_CGROUP_DEVICE=y
17CONFIG_CPUSETS=y 16CONFIG_CPUSETS=y
18CONFIG_CGROUP_CPUACCT=y 17CONFIG_CGROUP_CPUACCT=y
19CONFIG_RESOURCE_COUNTERS=y 18CONFIG_RESOURCE_COUNTERS=y
20CONFIG_CGROUP_MEMCG=y
21CONFIG_CGROUP_MEMCG_SWAP=y
22CONFIG_CGROUP_SCHED=y 19CONFIG_CGROUP_SCHED=y
23CONFIG_RT_GROUP_SCHED=y 20CONFIG_RT_GROUP_SCHED=y
24CONFIG_BLK_CGROUP=y 21CONFIG_BLK_CGROUP=y
25CONFIG_NAMESPACES=y 22CONFIG_NAMESPACES=y
26CONFIG_RELAY=y 23CONFIG_RELAY=y
27CONFIG_BLK_DEV_INITRD=y 24CONFIG_BLK_DEV_INITRD=y
25CONFIG_RD_XZ=y
28CONFIG_SYSCTL_SYSCALL=y 26CONFIG_SYSCTL_SYSCALL=y
29CONFIG_EMBEDDED=y 27CONFIG_EMBEDDED=y
30# CONFIG_COMPAT_BRK is not set 28# CONFIG_COMPAT_BRK is not set
@@ -44,11 +42,10 @@ CONFIG_UNIXWARE_DISKLABEL=y
44CONFIG_SGI_PARTITION=y 42CONFIG_SGI_PARTITION=y
45CONFIG_SUN_PARTITION=y 43CONFIG_SUN_PARTITION=y
46CONFIG_KARMA_PARTITION=y 44CONFIG_KARMA_PARTITION=y
47CONFIG_EFI_PARTITION=y
48CONFIG_CFQ_GROUP_IOSCHED=y 45CONFIG_CFQ_GROUP_IOSCHED=y
49CONFIG_NO_HZ=y
50CONFIG_HIGH_RES_TIMERS=y
51CONFIG_HZ_100=y 46CONFIG_HZ_100=y
47# CONFIG_COMPACTION is not set
48CONFIG_PREEMPT_VOLUNTARY=y
52CONFIG_PCI_DEBUG=y 49CONFIG_PCI_DEBUG=y
53# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 50# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
54CONFIG_BINFMT_MISC=y 51CONFIG_BINFMT_MISC=y
@@ -122,16 +119,15 @@ CONFIG_NF_CONNTRACK_PPTP=m
122CONFIG_NF_CONNTRACK_SANE=m 119CONFIG_NF_CONNTRACK_SANE=m
123CONFIG_NF_CONNTRACK_SIP=m 120CONFIG_NF_CONNTRACK_SIP=m
124CONFIG_NF_CONNTRACK_TFTP=m 121CONFIG_NF_CONNTRACK_TFTP=m
125CONFIG_NETFILTER_TPROXY=m
126CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m 122CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
127CONFIG_NETFILTER_XT_TARGET_CONNMARK=m 123CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
128CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m 124CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
129CONFIG_NETFILTER_XT_TARGET_CT=m
130CONFIG_NETFILTER_XT_TARGET_DSCP=m 125CONFIG_NETFILTER_XT_TARGET_DSCP=m
131CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m 126CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m
132CONFIG_NETFILTER_XT_TARGET_MARK=m 127CONFIG_NETFILTER_XT_TARGET_MARK=m
133CONFIG_NETFILTER_XT_TARGET_NFLOG=m 128CONFIG_NETFILTER_XT_TARGET_NFLOG=m
134CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m 129CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
130CONFIG_NETFILTER_XT_TARGET_NOTRACK=m
135CONFIG_NETFILTER_XT_TARGET_TEE=m 131CONFIG_NETFILTER_XT_TARGET_TEE=m
136CONFIG_NETFILTER_XT_TARGET_TPROXY=m 132CONFIG_NETFILTER_XT_TARGET_TPROXY=m
137CONFIG_NETFILTER_XT_TARGET_TRACE=m 133CONFIG_NETFILTER_XT_TARGET_TRACE=m
@@ -189,14 +185,12 @@ CONFIG_IP_VS_SED=m
189CONFIG_IP_VS_NQ=m 185CONFIG_IP_VS_NQ=m
190CONFIG_NF_CONNTRACK_IPV4=m 186CONFIG_NF_CONNTRACK_IPV4=m
191# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set 187# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
192CONFIG_IP_NF_QUEUE=m
193CONFIG_IP_NF_IPTABLES=y 188CONFIG_IP_NF_IPTABLES=y
194CONFIG_IP_NF_MATCH_AH=m 189CONFIG_IP_NF_MATCH_AH=m
195CONFIG_IP_NF_MATCH_ECN=m 190CONFIG_IP_NF_MATCH_ECN=m
196CONFIG_IP_NF_MATCH_TTL=m 191CONFIG_IP_NF_MATCH_TTL=m
197CONFIG_IP_NF_FILTER=y 192CONFIG_IP_NF_FILTER=y
198CONFIG_IP_NF_TARGET_REJECT=y 193CONFIG_IP_NF_TARGET_REJECT=y
199CONFIG_IP_NF_TARGET_LOG=m
200CONFIG_IP_NF_TARGET_ULOG=m 194CONFIG_IP_NF_TARGET_ULOG=m
201CONFIG_IP_NF_MANGLE=m 195CONFIG_IP_NF_MANGLE=m
202CONFIG_IP_NF_TARGET_ECN=m 196CONFIG_IP_NF_TARGET_ECN=m
@@ -207,8 +201,6 @@ CONFIG_IP_NF_ARPTABLES=m
207CONFIG_IP_NF_ARPFILTER=m 201CONFIG_IP_NF_ARPFILTER=m
208CONFIG_IP_NF_ARP_MANGLE=m 202CONFIG_IP_NF_ARP_MANGLE=m
209CONFIG_NF_CONNTRACK_IPV6=m 203CONFIG_NF_CONNTRACK_IPV6=m
210CONFIG_IP6_NF_QUEUE=m
211CONFIG_IP6_NF_IPTABLES=m
212CONFIG_IP6_NF_MATCH_AH=m 204CONFIG_IP6_NF_MATCH_AH=m
213CONFIG_IP6_NF_MATCH_EUI64=m 205CONFIG_IP6_NF_MATCH_EUI64=m
214CONFIG_IP6_NF_MATCH_FRAG=m 206CONFIG_IP6_NF_MATCH_FRAG=m
@@ -218,7 +210,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m
218CONFIG_IP6_NF_MATCH_MH=m 210CONFIG_IP6_NF_MATCH_MH=m
219CONFIG_IP6_NF_MATCH_RT=m 211CONFIG_IP6_NF_MATCH_RT=m
220CONFIG_IP6_NF_TARGET_HL=m 212CONFIG_IP6_NF_TARGET_HL=m
221CONFIG_IP6_NF_TARGET_LOG=m
222CONFIG_IP6_NF_FILTER=m 213CONFIG_IP6_NF_FILTER=m
223CONFIG_IP6_NF_TARGET_REJECT=m 214CONFIG_IP6_NF_TARGET_REJECT=m
224CONFIG_IP6_NF_MANGLE=m 215CONFIG_IP6_NF_MANGLE=m
@@ -249,7 +240,6 @@ CONFIG_BRIDGE_EBT_NFLOG=m
249CONFIG_RDS=m 240CONFIG_RDS=m
250CONFIG_RDS_TCP=m 241CONFIG_RDS_TCP=m
251CONFIG_BRIDGE=m 242CONFIG_BRIDGE=m
252CONFIG_NET_DSA=y
253CONFIG_VLAN_8021Q=m 243CONFIG_VLAN_8021Q=m
254CONFIG_VLAN_8021Q_GVRP=y 244CONFIG_VLAN_8021Q_GVRP=y
255CONFIG_PHONET=m 245CONFIG_PHONET=m
@@ -297,6 +287,7 @@ CONFIG_NET_ACT_SIMP=m
297CONFIG_NET_ACT_SKBEDIT=m 287CONFIG_NET_ACT_SKBEDIT=m
298CONFIG_NET_CLS_IND=y 288CONFIG_NET_CLS_IND=y
299CONFIG_DCB=y 289CONFIG_DCB=y
290CONFIG_DNS_RESOLVER=y
300# CONFIG_WIRELESS is not set 291# CONFIG_WIRELESS is not set
301CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 292CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
302CONFIG_DEVTMPFS=y 293CONFIG_DEVTMPFS=y
@@ -354,40 +345,7 @@ CONFIG_NET_DSA_MV88E6060=y
354CONFIG_NET_DSA_MV88E6131=y 345CONFIG_NET_DSA_MV88E6131=y
355CONFIG_NET_DSA_MV88E6123_61_65=y 346CONFIG_NET_DSA_MV88E6123_61_65=y
356# CONFIG_NET_VENDOR_3COM is not set 347# CONFIG_NET_VENDOR_3COM is not set
357# CONFIG_NET_VENDOR_ADAPTEC is not set 348CONFIG_E1000E=y
358# CONFIG_NET_VENDOR_ALTEON is not set
359# CONFIG_NET_VENDOR_AMD is not set
360# CONFIG_NET_VENDOR_ATHEROS is not set
361# CONFIG_NET_VENDOR_BROADCOM is not set
362# CONFIG_NET_VENDOR_BROCADE is not set
363# CONFIG_NET_VENDOR_CHELSIO is not set
364# CONFIG_NET_VENDOR_CISCO is not set
365# CONFIG_NET_VENDOR_DEC is not set
366# CONFIG_NET_VENDOR_DLINK is not set
367# CONFIG_NET_VENDOR_EMULEX is not set
368# CONFIG_NET_VENDOR_EXAR is not set
369# CONFIG_NET_VENDOR_HP is not set
370# CONFIG_NET_VENDOR_INTEL is not set
371# CONFIG_NET_VENDOR_MARVELL is not set
372# CONFIG_NET_VENDOR_MELLANOX is not set
373# CONFIG_NET_VENDOR_MICREL is not set
374# CONFIG_NET_VENDOR_MYRI is not set
375# CONFIG_NET_VENDOR_NATSEMI is not set
376# CONFIG_NET_VENDOR_NVIDIA is not set
377# CONFIG_NET_VENDOR_OKI is not set
378# CONFIG_NET_PACKET_ENGINE is not set
379# CONFIG_NET_VENDOR_QLOGIC is not set
380# CONFIG_NET_VENDOR_REALTEK is not set
381# CONFIG_NET_VENDOR_RDC is not set
382# CONFIG_NET_VENDOR_SEEQ is not set
383# CONFIG_NET_VENDOR_SILAN is not set
384# CONFIG_NET_VENDOR_SIS is not set
385# CONFIG_NET_VENDOR_SMSC is not set
386# CONFIG_NET_VENDOR_STMICRO is not set
387# CONFIG_NET_VENDOR_SUN is not set
388# CONFIG_NET_VENDOR_TEHUTI is not set
389# CONFIG_NET_VENDOR_TI is not set
390# CONFIG_NET_VENDOR_VIA is not set
391# CONFIG_WLAN is not set 349# CONFIG_WLAN is not set
392# CONFIG_INPUT_MOUSEDEV is not set 350# CONFIG_INPUT_MOUSEDEV is not set
393# CONFIG_INPUT_KEYBOARD is not set 351# CONFIG_INPUT_KEYBOARD is not set
@@ -403,7 +361,6 @@ CONFIG_I2C_CHARDEV=y
403CONFIG_WATCHDOG=y 361CONFIG_WATCHDOG=y
404CONFIG_WATCHDOG_NOWAYOUT=y 362CONFIG_WATCHDOG_NOWAYOUT=y
405# CONFIG_VGA_ARB is not set 363# CONFIG_VGA_ARB is not set
406# CONFIG_HID_SUPPORT is not set
407# CONFIG_USB_SUPPORT is not set 364# CONFIG_USB_SUPPORT is not set
408CONFIG_EDAC=y 365CONFIG_EDAC=y
409CONFIG_EDAC_MM_EDAC=y 366CONFIG_EDAC_MM_EDAC=y
@@ -448,13 +405,13 @@ CONFIG_PROC_KCORE=y
448CONFIG_TMPFS=y 405CONFIG_TMPFS=y
449CONFIG_TMPFS_POSIX_ACL=y 406CONFIG_TMPFS_POSIX_ACL=y
450CONFIG_HUGETLBFS=y 407CONFIG_HUGETLBFS=y
408CONFIG_CONFIGFS_FS=m
451CONFIG_ECRYPT_FS=m 409CONFIG_ECRYPT_FS=m
452CONFIG_CRAMFS=m 410CONFIG_CRAMFS=m
453CONFIG_SQUASHFS=m 411CONFIG_SQUASHFS=m
454CONFIG_NFS_FS=m 412CONFIG_NFS_FS=m
455CONFIG_NFS_V3=y
456CONFIG_NFS_V3_ACL=y 413CONFIG_NFS_V3_ACL=y
457CONFIG_NFS_V4=y 414CONFIG_NFS_V4=m
458CONFIG_NFS_V4_1=y 415CONFIG_NFS_V4_1=y
459CONFIG_NFS_FSCACHE=y 416CONFIG_NFS_FSCACHE=y
460CONFIG_NFSD=m 417CONFIG_NFSD=m
@@ -508,26 +465,29 @@ CONFIG_NLS_ISO8859_15=m
508CONFIG_NLS_KOI8_R=m 465CONFIG_NLS_KOI8_R=m
509CONFIG_NLS_KOI8_U=m 466CONFIG_NLS_KOI8_U=m
510CONFIG_NLS_UTF8=m 467CONFIG_NLS_UTF8=m
468CONFIG_DLM=m
511CONFIG_DLM_DEBUG=y 469CONFIG_DLM_DEBUG=y
470CONFIG_DYNAMIC_DEBUG=y
471CONFIG_DEBUG_INFO=y
472CONFIG_DEBUG_INFO_REDUCED=y
512# CONFIG_ENABLE_WARN_DEPRECATED is not set 473# CONFIG_ENABLE_WARN_DEPRECATED is not set
513CONFIG_FRAME_WARN=2048 474CONFIG_FRAME_WARN=2048
514CONFIG_MAGIC_SYSRQ=y
515CONFIG_STRIP_ASM_SYMS=y 475CONFIG_STRIP_ASM_SYMS=y
516CONFIG_DEBUG_FS=y 476CONFIG_DEBUG_FS=y
517CONFIG_HEADERS_CHECK=y 477CONFIG_HEADERS_CHECK=y
478# CONFIG_FRAME_POINTER is not set
479CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
480CONFIG_MAGIC_SYSRQ=y
481CONFIG_DEBUG_VM=y
482CONFIG_DEBUG_MEMORY_INIT=y
483CONFIG_DEBUG_STACKOVERFLOW=y
518CONFIG_LOCKUP_DETECTOR=y 484CONFIG_LOCKUP_DETECTOR=y
519CONFIG_SCHEDSTATS=y 485CONFIG_SCHEDSTATS=y
520CONFIG_TIMER_STATS=y 486CONFIG_TIMER_STATS=y
521CONFIG_DEBUG_INFO=y
522CONFIG_DEBUG_INFO_REDUCED=y
523CONFIG_DEBUG_VM=y
524CONFIG_DEBUG_MEMORY_INIT=y
525CONFIG_DEBUG_LIST=y 487CONFIG_DEBUG_LIST=y
526CONFIG_DEBUG_CREDENTIALS=y 488CONFIG_DEBUG_CREDENTIALS=y
527CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y 489CONFIG_RCU_CPU_STALL_TIMEOUT=60
528CONFIG_DYNAMIC_DEBUG=y
529CONFIG_ASYNC_RAID6_TEST=m 490CONFIG_ASYNC_RAID6_TEST=m
530CONFIG_DEBUG_STACKOVERFLOW=y
531CONFIG_KEYS_DEBUG_PROC_KEYS=y 491CONFIG_KEYS_DEBUG_PROC_KEYS=y
532CONFIG_SECURITY=y 492CONFIG_SECURITY=y
533CONFIG_SECURITYFS=y 493CONFIG_SECURITYFS=y
@@ -536,7 +496,6 @@ CONFIG_SECURITY_NETWORK_XFRM=y
536CONFIG_SECURITY_SELINUX=y 496CONFIG_SECURITY_SELINUX=y
537CONFIG_SECURITY_SELINUX_BOOTPARAM=y 497CONFIG_SECURITY_SELINUX_BOOTPARAM=y
538CONFIG_SECURITY_SELINUX_DISABLE=y 498CONFIG_SECURITY_SELINUX_DISABLE=y
539CONFIG_CRYPTO_NULL=m
540CONFIG_CRYPTO_PCRYPT=m 499CONFIG_CRYPTO_PCRYPT=m
541CONFIG_CRYPTO_CRYPTD=m 500CONFIG_CRYPTO_CRYPTD=m
542CONFIG_CRYPTO_TEST=m 501CONFIG_CRYPTO_TEST=m
@@ -549,14 +508,12 @@ CONFIG_CRYPTO_XTS=m
549CONFIG_CRYPTO_HMAC=y 508CONFIG_CRYPTO_HMAC=y
550CONFIG_CRYPTO_XCBC=m 509CONFIG_CRYPTO_XCBC=m
551CONFIG_CRYPTO_VMAC=m 510CONFIG_CRYPTO_VMAC=m
552CONFIG_CRYPTO_CRC32C=y
553CONFIG_CRYPTO_MICHAEL_MIC=m 511CONFIG_CRYPTO_MICHAEL_MIC=m
554CONFIG_CRYPTO_RMD128=m 512CONFIG_CRYPTO_RMD128=m
555CONFIG_CRYPTO_RMD160=m 513CONFIG_CRYPTO_RMD160=m
556CONFIG_CRYPTO_RMD256=m 514CONFIG_CRYPTO_RMD256=m
557CONFIG_CRYPTO_RMD320=m 515CONFIG_CRYPTO_RMD320=m
558CONFIG_CRYPTO_SHA1=y 516CONFIG_CRYPTO_SHA1=y
559CONFIG_CRYPTO_SHA256=m
560CONFIG_CRYPTO_SHA512=m 517CONFIG_CRYPTO_SHA512=m
561CONFIG_CRYPTO_TGR192=m 518CONFIG_CRYPTO_TGR192=m
562CONFIG_CRYPTO_WP512=m 519CONFIG_CRYPTO_WP512=m
diff --git a/arch/tile/gxio/Kconfig b/arch/tile/gxio/Kconfig
index d221f8d6de8b..d4e10d58071b 100644
--- a/arch/tile/gxio/Kconfig
+++ b/arch/tile/gxio/Kconfig
@@ -26,3 +26,8 @@ config TILE_GXIO_TRIO
26config TILE_GXIO_USB_HOST 26config TILE_GXIO_USB_HOST
27 bool 27 bool
28 select TILE_GXIO 28 select TILE_GXIO
29
30# Support direct access to the TILE-Gx UART hardware from kernel space.
31config TILE_GXIO_UART
32 bool
33 select TILE_GXIO
diff --git a/arch/tile/gxio/Makefile b/arch/tile/gxio/Makefile
index 8684bcaa74ea..26ae2c727467 100644
--- a/arch/tile/gxio/Makefile
+++ b/arch/tile/gxio/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_TILE_GXIO) += iorpc_globals.o kiorpc.o
6obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o 6obj-$(CONFIG_TILE_GXIO_DMA) += dma_queue.o
7obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o 7obj-$(CONFIG_TILE_GXIO_MPIPE) += mpipe.o iorpc_mpipe.o iorpc_mpipe_info.o
8obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o 8obj-$(CONFIG_TILE_GXIO_TRIO) += trio.o iorpc_trio.o
9obj-$(CONFIG_TILE_GXIO_UART) += uart.o iorpc_uart.o
9obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o 10obj-$(CONFIG_TILE_GXIO_USB_HOST) += usb_host.o iorpc_usb_host.o
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
index 31b87bf8c027..4f8f3d619c4a 100644
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
387 387
388EXPORT_SYMBOL(gxio_mpipe_link_close_aux); 388EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
389 389
390struct link_set_attr_aux_param {
391 int mac;
392 uint32_t attr;
393 int64_t val;
394};
395
396int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
397 uint32_t attr, int64_t val)
398{
399 struct link_set_attr_aux_param temp;
400 struct link_set_attr_aux_param *params = &temp;
401
402 params->mac = mac;
403 params->attr = attr;
404 params->val = val;
405
406 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
407 sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
408}
409
410EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
390 411
391struct get_timestamp_aux_param { 412struct get_timestamp_aux_param {
392 uint64_t sec; 413 uint64_t sec;
@@ -454,6 +475,51 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
454 475
455EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux); 476EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
456 477
478struct adjust_timestamp_freq_param {
479 int32_t ppb;
480};
481
482int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
483 int32_t ppb)
484{
485 struct adjust_timestamp_freq_param temp;
486 struct adjust_timestamp_freq_param *params = &temp;
487
488 params->ppb = ppb;
489
490 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
491 sizeof(*params),
492 GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
493}
494
495EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
496
497struct config_edma_ring_blks_param {
498 unsigned int ering;
499 unsigned int max_blks;
500 unsigned int min_snf_blks;
501 unsigned int db;
502};
503
504int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
505 unsigned int ering, unsigned int max_blks,
506 unsigned int min_snf_blks, unsigned int db)
507{
508 struct config_edma_ring_blks_param temp;
509 struct config_edma_ring_blks_param *params = &temp;
510
511 params->ering = ering;
512 params->max_blks = max_blks;
513 params->min_snf_blks = min_snf_blks;
514 params->db = db;
515
516 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
517 sizeof(*params),
518 GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
519}
520
521EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
522
457struct arm_pollfd_param { 523struct arm_pollfd_param {
458 union iorpc_pollfd pollfd; 524 union iorpc_pollfd pollfd;
459}; 525};
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index d0254aa60cba..64883aabeb9c 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -16,6 +16,24 @@
16#include "gxio/iorpc_mpipe_info.h" 16#include "gxio/iorpc_mpipe_info.h"
17 17
18 18
19struct instance_aux_param {
20 _gxio_mpipe_link_name_t name;
21};
22
23int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
24 _gxio_mpipe_link_name_t name)
25{
26 struct instance_aux_param temp;
27 struct instance_aux_param *params = &temp;
28
29 params->name = name;
30
31 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
32 sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
33}
34
35EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
36
19struct enumerate_aux_param { 37struct enumerate_aux_param {
20 _gxio_mpipe_link_name_t name; 38 _gxio_mpipe_link_name_t name;
21 _gxio_mpipe_link_mac_t mac; 39 _gxio_mpipe_link_mac_t mac;
diff --git a/arch/tile/gxio/iorpc_trio.c b/arch/tile/gxio/iorpc_trio.c
index cef4b2209cda..da6e18e049c3 100644
--- a/arch/tile/gxio/iorpc_trio.c
+++ b/arch/tile/gxio/iorpc_trio.c
@@ -61,6 +61,29 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
61 61
62EXPORT_SYMBOL(gxio_trio_alloc_memory_maps); 62EXPORT_SYMBOL(gxio_trio_alloc_memory_maps);
63 63
64struct alloc_scatter_queues_param {
65 unsigned int count;
66 unsigned int first;
67 unsigned int flags;
68};
69
70int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
71 unsigned int count, unsigned int first,
72 unsigned int flags)
73{
74 struct alloc_scatter_queues_param temp;
75 struct alloc_scatter_queues_param *params = &temp;
76
77 params->count = count;
78 params->first = first;
79 params->flags = flags;
80
81 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
82 sizeof(*params),
83 GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES);
84}
85
86EXPORT_SYMBOL(gxio_trio_alloc_scatter_queues);
64 87
65struct alloc_pio_regions_param { 88struct alloc_pio_regions_param {
66 unsigned int count; 89 unsigned int count;
diff --git a/arch/tile/gxio/iorpc_uart.c b/arch/tile/gxio/iorpc_uart.c
new file mode 100644
index 000000000000..b9a6d6193d73
--- /dev/null
+++ b/arch/tile/gxio/iorpc_uart.c
@@ -0,0 +1,77 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#include "gxio/iorpc_uart.h"
17
18struct cfg_interrupt_param {
19 union iorpc_interrupt interrupt;
20};
21
22int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
23 int inter_y, int inter_ipi, int inter_event)
24{
25 struct cfg_interrupt_param temp;
26 struct cfg_interrupt_param *params = &temp;
27
28 params->interrupt.kernel.x = inter_x;
29 params->interrupt.kernel.y = inter_y;
30 params->interrupt.kernel.ipi = inter_ipi;
31 params->interrupt.kernel.event = inter_event;
32
33 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
34 sizeof(*params), GXIO_UART_OP_CFG_INTERRUPT);
35}
36
37EXPORT_SYMBOL(gxio_uart_cfg_interrupt);
38
39struct get_mmio_base_param {
40 HV_PTE base;
41};
42
43int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base)
44{
45 int __result;
46 struct get_mmio_base_param temp;
47 struct get_mmio_base_param *params = &temp;
48
49 __result =
50 hv_dev_pread(context->fd, 0, (HV_VirtAddr) params, sizeof(*params),
51 GXIO_UART_OP_GET_MMIO_BASE);
52 *base = params->base;
53
54 return __result;
55}
56
57EXPORT_SYMBOL(gxio_uart_get_mmio_base);
58
59struct check_mmio_offset_param {
60 unsigned long offset;
61 unsigned long size;
62};
63
64int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
65 unsigned long offset, unsigned long size)
66{
67 struct check_mmio_offset_param temp;
68 struct check_mmio_offset_param *params = &temp;
69
70 params->offset = offset;
71 params->size = size;
72
73 return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
74 sizeof(*params), GXIO_UART_OP_CHECK_MMIO_OFFSET);
75}
76
77EXPORT_SYMBOL(gxio_uart_check_mmio_offset);
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index e71c63390acc..5301a9ffbae1 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
36 int fd; 36 int fd;
37 int i; 37 int i;
38 38
39 if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
40 return -EINVAL;
41
39 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index); 42 snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
40 fd = hv_dev_open((HV_VirtAddr) file, 0); 43 fd = hv_dev_open((HV_VirtAddr) file, 0);
44
45 context->fd = fd;
46
41 if (fd < 0) { 47 if (fd < 0) {
42 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX) 48 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
43 return fd; 49 return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
45 return -ENODEV; 51 return -ENODEV;
46 } 52 }
47 53
48 context->fd = fd;
49
50 /* Map in the MMIO space. */ 54 /* Map in the MMIO space. */
51 context->mmio_cfg_base = (void __force *) 55 context->mmio_cfg_base = (void __force *)
52 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET, 56 iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
64 for (i = 0; i < 8; i++) 68 for (i = 0; i < 8; i++)
65 context->__stacks.stacks[i] = 255; 69 context->__stacks.stacks[i] = 255;
66 70
71 context->instance = mpipe_index;
72
67 return 0; 73 return 0;
68 74
69 fast_failed: 75 fast_failed:
70 iounmap((void __force __iomem *)(context->mmio_cfg_base)); 76 iounmap((void __force __iomem *)(context->mmio_cfg_base));
71 cfg_failed: 77 cfg_failed:
72 hv_dev_close(context->fd); 78 hv_dev_close(context->fd);
79 context->fd = -1;
73 return -ENODEV; 80 return -ENODEV;
74} 81}
75 82
@@ -383,7 +390,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
383 390
384int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, 391int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
385 gxio_mpipe_context_t *context, 392 gxio_mpipe_context_t *context,
386 unsigned int edma_ring_id, 393 unsigned int ering,
387 unsigned int channel, 394 unsigned int channel,
388 void *mem, unsigned int mem_size, 395 void *mem, unsigned int mem_size,
389 unsigned int mem_flags) 396 unsigned int mem_flags)
@@ -394,7 +401,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
394 /* Offset used to read number of completed commands. */ 401 /* Offset used to read number of completed commands. */
395 MPIPE_EDMA_POST_REGION_ADDR_t offset; 402 MPIPE_EDMA_POST_REGION_ADDR_t offset;
396 403
397 int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel, 404 int result = gxio_mpipe_init_edma_ring(context, ering, channel,
398 mem, mem_size, mem_flags); 405 mem, mem_size, mem_flags);
399 if (result < 0) 406 if (result < 0)
400 return result; 407 return result;
@@ -405,7 +412,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
405 offset.region = 412 offset.region =
406 MPIPE_MMIO_ADDR__REGION_VAL_EDMA - 413 MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
407 MPIPE_MMIO_ADDR__REGION_VAL_IDMA; 414 MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
408 offset.ring = edma_ring_id; 415 offset.ring = ering;
409 416
410 __gxio_dma_queue_init(&equeue->dma_queue, 417 __gxio_dma_queue_init(&equeue->dma_queue,
411 context->mmio_fast_base + offset.word, 418 context->mmio_fast_base + offset.word,
@@ -413,6 +420,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
413 equeue->edescs = mem; 420 equeue->edescs = mem;
414 equeue->mask_num_entries = num_entries - 1; 421 equeue->mask_num_entries = num_entries - 1;
415 equeue->log2_num_entries = __builtin_ctz(num_entries); 422 equeue->log2_num_entries = __builtin_ctz(num_entries);
423 equeue->context = context;
424 equeue->ering = ering;
425 equeue->channel = channel;
416 426
417 return 0; 427 return 0;
418} 428}
@@ -493,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
493 return contextp; 503 return contextp;
494} 504}
495 505
506int gxio_mpipe_link_instance(const char *link_name)
507{
508 _gxio_mpipe_link_name_t name;
509 gxio_mpipe_context_t *context = _gxio_get_link_context();
510
511 if (!context)
512 return GXIO_ERR_NO_DEVICE;
513
514 strncpy(name.name, link_name, sizeof(name.name));
515 name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
516
517 return gxio_mpipe_info_instance_aux(context, name);
518}
519
496int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac) 520int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
497{ 521{
498 int rv; 522 int rv;
@@ -543,3 +567,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
543} 567}
544 568
545EXPORT_SYMBOL_GPL(gxio_mpipe_link_close); 569EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
570
571int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
572 int64_t val)
573{
574 return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
575 val);
576}
577
578EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
diff --git a/arch/tile/gxio/uart.c b/arch/tile/gxio/uart.c
new file mode 100644
index 000000000000..ba585175ef88
--- /dev/null
+++ b/arch/tile/gxio/uart.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * Implementation of UART gxio calls.
17 */
18
19#include <linux/io.h>
20#include <linux/errno.h>
21#include <linux/module.h>
22
23#include <gxio/uart.h>
24#include <gxio/iorpc_globals.h>
25#include <gxio/iorpc_uart.h>
26#include <gxio/kiorpc.h>
27
28int gxio_uart_init(gxio_uart_context_t *context, int uart_index)
29{
30 char file[32];
31 int fd;
32
33 snprintf(file, sizeof(file), "uart/%d/iorpc", uart_index);
34 fd = hv_dev_open((HV_VirtAddr) file, 0);
35 if (fd < 0) {
36 if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
37 return fd;
38 else
39 return -ENODEV;
40 }
41
42 context->fd = fd;
43
44 /* Map in the MMIO space. */
45 context->mmio_base = (void __force *)
46 iorpc_ioremap(fd, HV_UART_MMIO_OFFSET, HV_UART_MMIO_SIZE);
47
48 if (context->mmio_base == NULL) {
49 hv_dev_close(context->fd);
50 context->fd = -1;
51 return -ENODEV;
52 }
53
54 return 0;
55}
56
57EXPORT_SYMBOL_GPL(gxio_uart_init);
58
59int gxio_uart_destroy(gxio_uart_context_t *context)
60{
61 iounmap((void __force __iomem *)(context->mmio_base));
62 hv_dev_close(context->fd);
63
64 context->mmio_base = NULL;
65 context->fd = -1;
66
67 return 0;
68}
69
70EXPORT_SYMBOL_GPL(gxio_uart_destroy);
71
72/* UART register write wrapper. */
73void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
74 uint64_t word)
75{
76 __gxio_mmio_write(context->mmio_base + offset, word);
77}
78
79EXPORT_SYMBOL_GPL(gxio_uart_write);
80
81/* UART register read wrapper. */
82uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset)
83{
84 return __gxio_mmio_read(context->mmio_base + offset);
85}
86
87EXPORT_SYMBOL_GPL(gxio_uart_read);
diff --git a/arch/tile/include/arch/trio.h b/arch/tile/include/arch/trio.h
index d3000a871a21..c0ddedcae085 100644
--- a/arch/tile/include/arch/trio.h
+++ b/arch/tile/include/arch/trio.h
@@ -23,6 +23,45 @@
23#ifndef __ASSEMBLER__ 23#ifndef __ASSEMBLER__
24 24
25/* 25/*
26 * Map SQ Doorbell Format.
27 * This describes the format of the write-only doorbell register that exists
28 * in the last 8-bytes of the MAP_SQ_BASE/LIM range. This register is only
29 * writable from PCIe space. Writes to this register will not be written to
30 * Tile memory space and thus no IO VA translation is required if the last
31 * page of the BASE/LIM range is not otherwise written.
32 */
33
34__extension__
35typedef union
36{
37 struct
38 {
39#ifndef __BIG_ENDIAN__
40 /*
41 * When written with a 1, the associated MAP_SQ region's doorbell
42 * interrupt will be triggered once all previous writes are visible to
43 * Tile software.
44 */
45 uint_reg_t doorbell : 1;
46 /*
47 * When written with a 1, the descriptor at the head of the associated
48 * MAP_SQ's FIFO will be dequeued.
49 */
50 uint_reg_t pop : 1;
51 /* Reserved. */
52 uint_reg_t __reserved : 62;
53#else /* __BIG_ENDIAN__ */
54 uint_reg_t __reserved : 62;
55 uint_reg_t pop : 1;
56 uint_reg_t doorbell : 1;
57#endif
58 };
59
60 uint_reg_t word;
61} TRIO_MAP_SQ_DOORBELL_FMT_t;
62
63
64/*
26 * Tile PIO Region Configuration - CFG Address Format. 65 * Tile PIO Region Configuration - CFG Address Format.
27 * This register describes the address format for PIO accesses when the 66 * This register describes the address format for PIO accesses when the
28 * associated region is setup with TYPE=CFG. 67 * associated region is setup with TYPE=CFG.
diff --git a/arch/tile/include/arch/uart.h b/arch/tile/include/arch/uart.h
new file mode 100644
index 000000000000..07966970adad
--- /dev/null
+++ b/arch/tile/include/arch/uart.h
@@ -0,0 +1,300 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17#ifndef __ARCH_UART_H__
18#define __ARCH_UART_H__
19
20#include <arch/abi.h>
21#include <arch/uart_def.h>
22
23#ifndef __ASSEMBLER__
24
25/* Divisor. */
26
27__extension__
28typedef union
29{
30 struct
31 {
32#ifndef __BIG_ENDIAN__
33 /*
34 * Baud Rate Divisor. Desired_baud_rate = REF_CLK frequency / (baud *
35 * 16).
36 * Note: REF_CLK is always 125 MHz, the default
37 * divisor = 68, baud rate = 125M/(68*16) = 115200 baud.
38 */
39 uint_reg_t divisor : 12;
40 /* Reserved. */
41 uint_reg_t __reserved : 52;
42#else /* __BIG_ENDIAN__ */
43 uint_reg_t __reserved : 52;
44 uint_reg_t divisor : 12;
45#endif
46 };
47
48 uint_reg_t word;
49} UART_DIVISOR_t;
50
51/* FIFO Count. */
52
53__extension__
54typedef union
55{
56 struct
57 {
58#ifndef __BIG_ENDIAN__
59 /*
60 * n: n active entries in the receive FIFO (max is 2**8). Each entry has
61 * 8 bits.
62 * 0: no active entry in the receive FIFO (that is empty).
63 */
64 uint_reg_t rfifo_count : 9;
65 /* Reserved. */
66 uint_reg_t __reserved_0 : 7;
67 /*
68 * n: n active entries in the transmit FIFO (max is 2**8). Each entry has
69 * 8 bits.
70 * 0: no active entry in the transmit FIFO (that is empty).
71 */
72 uint_reg_t tfifo_count : 9;
73 /* Reserved. */
74 uint_reg_t __reserved_1 : 7;
75 /*
76 * n: n active entries in the write FIFO (max is 2**2). Each entry has 8
77 * bits.
78 * 0: no active entry in the write FIFO (that is empty).
79 */
80 uint_reg_t wfifo_count : 3;
81 /* Reserved. */
82 uint_reg_t __reserved_2 : 29;
83#else /* __BIG_ENDIAN__ */
84 uint_reg_t __reserved_2 : 29;
85 uint_reg_t wfifo_count : 3;
86 uint_reg_t __reserved_1 : 7;
87 uint_reg_t tfifo_count : 9;
88 uint_reg_t __reserved_0 : 7;
89 uint_reg_t rfifo_count : 9;
90#endif
91 };
92
93 uint_reg_t word;
94} UART_FIFO_COUNT_t;
95
96/* FLAG. */
97
98__extension__
99typedef union
100{
101 struct
102 {
103#ifndef __BIG_ENDIAN__
104 /* Reserved. */
105 uint_reg_t __reserved_0 : 1;
106 /* 1: receive FIFO is empty */
107 uint_reg_t rfifo_empty : 1;
108 /* 1: write FIFO is empty. */
109 uint_reg_t wfifo_empty : 1;
110 /* 1: transmit FIFO is empty. */
111 uint_reg_t tfifo_empty : 1;
112 /* 1: receive FIFO is full. */
113 uint_reg_t rfifo_full : 1;
114 /* 1: write FIFO is full. */
115 uint_reg_t wfifo_full : 1;
116 /* 1: transmit FIFO is full. */
117 uint_reg_t tfifo_full : 1;
118 /* Reserved. */
119 uint_reg_t __reserved_1 : 57;
120#else /* __BIG_ENDIAN__ */
121 uint_reg_t __reserved_1 : 57;
122 uint_reg_t tfifo_full : 1;
123 uint_reg_t wfifo_full : 1;
124 uint_reg_t rfifo_full : 1;
125 uint_reg_t tfifo_empty : 1;
126 uint_reg_t wfifo_empty : 1;
127 uint_reg_t rfifo_empty : 1;
128 uint_reg_t __reserved_0 : 1;
129#endif
130 };
131
132 uint_reg_t word;
133} UART_FLAG_t;
134
135/*
136 * Interrupt Vector Mask.
137 * Each bit in this register corresponds to a specific interrupt. When set,
138 * the associated interrupt will not be dispatched.
139 */
140
141__extension__
142typedef union
143{
144 struct
145 {
146#ifndef __BIG_ENDIAN__
147 /* Read data FIFO read and no data available */
148 uint_reg_t rdat_err : 1;
149 /* Write FIFO was written but it was full */
150 uint_reg_t wdat_err : 1;
151 /* Stop bit not found when current data was received */
152 uint_reg_t frame_err : 1;
153 /* Parity error was detected when current data was received */
154 uint_reg_t parity_err : 1;
155 /* Data was received but the receive FIFO was full */
156 uint_reg_t rfifo_overflow : 1;
157 /*
158 * An almost full event is reached when data is to be written to the
159 * receive FIFO, and the receive FIFO has more than or equal to
160 * BUFFER_THRESHOLD.RFIFO_AFULL bytes.
161 */
162 uint_reg_t rfifo_afull : 1;
163 /* Reserved. */
164 uint_reg_t __reserved_0 : 1;
165 /* An entry in the transmit FIFO was popped */
166 uint_reg_t tfifo_re : 1;
167 /* An entry has been pushed into the receive FIFO */
168 uint_reg_t rfifo_we : 1;
169 /* An entry of the write FIFO has been popped */
170 uint_reg_t wfifo_re : 1;
171 /* Rshim read receive FIFO in protocol mode */
172 uint_reg_t rfifo_err : 1;
173 /*
174 * An almost empty event is reached when data is to be read from the
175 * transmit FIFO, and the transmit FIFO has less than or equal to
176 * BUFFER_THRESHOLD.TFIFO_AEMPTY bytes.
177 */
178 uint_reg_t tfifo_aempty : 1;
179 /* Reserved. */
180 uint_reg_t __reserved_1 : 52;
181#else /* __BIG_ENDIAN__ */
182 uint_reg_t __reserved_1 : 52;
183 uint_reg_t tfifo_aempty : 1;
184 uint_reg_t rfifo_err : 1;
185 uint_reg_t wfifo_re : 1;
186 uint_reg_t rfifo_we : 1;
187 uint_reg_t tfifo_re : 1;
188 uint_reg_t __reserved_0 : 1;
189 uint_reg_t rfifo_afull : 1;
190 uint_reg_t rfifo_overflow : 1;
191 uint_reg_t parity_err : 1;
192 uint_reg_t frame_err : 1;
193 uint_reg_t wdat_err : 1;
194 uint_reg_t rdat_err : 1;
195#endif
196 };
197
198 uint_reg_t word;
199} UART_INTERRUPT_MASK_t;
200
201/*
202 * Interrupt vector, write-one-to-clear.
203 * Each bit in this register corresponds to a specific interrupt. Hardware
204 * sets the bit when the associated condition has occurred. Writing a 1
205 * clears the status bit.
206 */
207
208__extension__
209typedef union
210{
211 struct
212 {
213#ifndef __BIG_ENDIAN__
214 /* Read data FIFO read and no data available */
215 uint_reg_t rdat_err : 1;
216 /* Write FIFO was written but it was full */
217 uint_reg_t wdat_err : 1;
218 /* Stop bit not found when current data was received */
219 uint_reg_t frame_err : 1;
220 /* Parity error was detected when current data was received */
221 uint_reg_t parity_err : 1;
222 /* Data was received but the receive FIFO was full */
223 uint_reg_t rfifo_overflow : 1;
224 /*
225 * Data was received and the receive FIFO is now almost full (more than
226 * BUFFER_THRESHOLD.RFIFO_AFULL bytes in it)
227 */
228 uint_reg_t rfifo_afull : 1;
229 /* Reserved. */
230 uint_reg_t __reserved_0 : 1;
231 /* An entry in the transmit FIFO was popped */
232 uint_reg_t tfifo_re : 1;
233 /* An entry has been pushed into the receive FIFO */
234 uint_reg_t rfifo_we : 1;
235 /* An entry of the write FIFO has been popped */
236 uint_reg_t wfifo_re : 1;
237 /* Rshim read receive FIFO in protocol mode */
238 uint_reg_t rfifo_err : 1;
239 /*
240 * Data was read from the transmit FIFO and now it is almost empty (less
241 * than or equal to BUFFER_THRESHOLD.TFIFO_AEMPTY bytes in it).
242 */
243 uint_reg_t tfifo_aempty : 1;
244 /* Reserved. */
245 uint_reg_t __reserved_1 : 52;
246#else /* __BIG_ENDIAN__ */
247 uint_reg_t __reserved_1 : 52;
248 uint_reg_t tfifo_aempty : 1;
249 uint_reg_t rfifo_err : 1;
250 uint_reg_t wfifo_re : 1;
251 uint_reg_t rfifo_we : 1;
252 uint_reg_t tfifo_re : 1;
253 uint_reg_t __reserved_0 : 1;
254 uint_reg_t rfifo_afull : 1;
255 uint_reg_t rfifo_overflow : 1;
256 uint_reg_t parity_err : 1;
257 uint_reg_t frame_err : 1;
258 uint_reg_t wdat_err : 1;
259 uint_reg_t rdat_err : 1;
260#endif
261 };
262
263 uint_reg_t word;
264} UART_INTERRUPT_STATUS_t;
265
266/* Type. */
267
268__extension__
269typedef union
270{
271 struct
272 {
273#ifndef __BIG_ENDIAN__
274 /* Number of stop bits, rx and tx */
275 uint_reg_t sbits : 1;
276 /* Reserved. */
277 uint_reg_t __reserved_0 : 1;
278 /* Data word size, rx and tx */
279 uint_reg_t dbits : 1;
280 /* Reserved. */
281 uint_reg_t __reserved_1 : 1;
282 /* Parity selection, rx and tx */
283 uint_reg_t ptype : 3;
284 /* Reserved. */
285 uint_reg_t __reserved_2 : 57;
286#else /* __BIG_ENDIAN__ */
287 uint_reg_t __reserved_2 : 57;
288 uint_reg_t ptype : 3;
289 uint_reg_t __reserved_1 : 1;
290 uint_reg_t dbits : 1;
291 uint_reg_t __reserved_0 : 1;
292 uint_reg_t sbits : 1;
293#endif
294 };
295
296 uint_reg_t word;
297} UART_TYPE_t;
298#endif /* !defined(__ASSEMBLER__) */
299
300#endif /* !defined(__ARCH_UART_H__) */
diff --git a/arch/tile/include/arch/uart_def.h b/arch/tile/include/arch/uart_def.h
new file mode 100644
index 000000000000..42bcaf535379
--- /dev/null
+++ b/arch/tile/include/arch/uart_def.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* Machine-generated file; do not edit. */
16
17#ifndef __ARCH_UART_DEF_H__
18#define __ARCH_UART_DEF_H__
19#define UART_DIVISOR 0x0158
20#define UART_FIFO_COUNT 0x0110
21#define UART_FLAG 0x0108
22#define UART_INTERRUPT_MASK 0x0208
23#define UART_INTERRUPT_MASK__RDAT_ERR_SHIFT 0
24#define UART_INTERRUPT_MASK__RDAT_ERR_WIDTH 1
25#define UART_INTERRUPT_MASK__RDAT_ERR_RESET_VAL 1
26#define UART_INTERRUPT_MASK__RDAT_ERR_RMASK 0x1
27#define UART_INTERRUPT_MASK__RDAT_ERR_MASK 0x1
28#define UART_INTERRUPT_MASK__RDAT_ERR_FIELD 0,0
29#define UART_INTERRUPT_MASK__WDAT_ERR_SHIFT 1
30#define UART_INTERRUPT_MASK__WDAT_ERR_WIDTH 1
31#define UART_INTERRUPT_MASK__WDAT_ERR_RESET_VAL 1
32#define UART_INTERRUPT_MASK__WDAT_ERR_RMASK 0x1
33#define UART_INTERRUPT_MASK__WDAT_ERR_MASK 0x2
34#define UART_INTERRUPT_MASK__WDAT_ERR_FIELD 1,1
35#define UART_INTERRUPT_MASK__FRAME_ERR_SHIFT 2
36#define UART_INTERRUPT_MASK__FRAME_ERR_WIDTH 1
37#define UART_INTERRUPT_MASK__FRAME_ERR_RESET_VAL 1
38#define UART_INTERRUPT_MASK__FRAME_ERR_RMASK 0x1
39#define UART_INTERRUPT_MASK__FRAME_ERR_MASK 0x4
40#define UART_INTERRUPT_MASK__FRAME_ERR_FIELD 2,2
41#define UART_INTERRUPT_MASK__PARITY_ERR_SHIFT 3
42#define UART_INTERRUPT_MASK__PARITY_ERR_WIDTH 1
43#define UART_INTERRUPT_MASK__PARITY_ERR_RESET_VAL 1
44#define UART_INTERRUPT_MASK__PARITY_ERR_RMASK 0x1
45#define UART_INTERRUPT_MASK__PARITY_ERR_MASK 0x8
46#define UART_INTERRUPT_MASK__PARITY_ERR_FIELD 3,3
47#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_SHIFT 4
48#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_WIDTH 1
49#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RESET_VAL 1
50#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_RMASK 0x1
51#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_MASK 0x10
52#define UART_INTERRUPT_MASK__RFIFO_OVERFLOW_FIELD 4,4
53#define UART_INTERRUPT_MASK__RFIFO_AFULL_SHIFT 5
54#define UART_INTERRUPT_MASK__RFIFO_AFULL_WIDTH 1
55#define UART_INTERRUPT_MASK__RFIFO_AFULL_RESET_VAL 1
56#define UART_INTERRUPT_MASK__RFIFO_AFULL_RMASK 0x1
57#define UART_INTERRUPT_MASK__RFIFO_AFULL_MASK 0x20
58#define UART_INTERRUPT_MASK__RFIFO_AFULL_FIELD 5,5
59#define UART_INTERRUPT_MASK__TFIFO_RE_SHIFT 7
60#define UART_INTERRUPT_MASK__TFIFO_RE_WIDTH 1
61#define UART_INTERRUPT_MASK__TFIFO_RE_RESET_VAL 1
62#define UART_INTERRUPT_MASK__TFIFO_RE_RMASK 0x1
63#define UART_INTERRUPT_MASK__TFIFO_RE_MASK 0x80
64#define UART_INTERRUPT_MASK__TFIFO_RE_FIELD 7,7
65#define UART_INTERRUPT_MASK__RFIFO_WE_SHIFT 8
66#define UART_INTERRUPT_MASK__RFIFO_WE_WIDTH 1
67#define UART_INTERRUPT_MASK__RFIFO_WE_RESET_VAL 1
68#define UART_INTERRUPT_MASK__RFIFO_WE_RMASK 0x1
69#define UART_INTERRUPT_MASK__RFIFO_WE_MASK 0x100
70#define UART_INTERRUPT_MASK__RFIFO_WE_FIELD 8,8
71#define UART_INTERRUPT_MASK__WFIFO_RE_SHIFT 9
72#define UART_INTERRUPT_MASK__WFIFO_RE_WIDTH 1
73#define UART_INTERRUPT_MASK__WFIFO_RE_RESET_VAL 1
74#define UART_INTERRUPT_MASK__WFIFO_RE_RMASK 0x1
75#define UART_INTERRUPT_MASK__WFIFO_RE_MASK 0x200
76#define UART_INTERRUPT_MASK__WFIFO_RE_FIELD 9,9
77#define UART_INTERRUPT_MASK__RFIFO_ERR_SHIFT 10
78#define UART_INTERRUPT_MASK__RFIFO_ERR_WIDTH 1
79#define UART_INTERRUPT_MASK__RFIFO_ERR_RESET_VAL 1
80#define UART_INTERRUPT_MASK__RFIFO_ERR_RMASK 0x1
81#define UART_INTERRUPT_MASK__RFIFO_ERR_MASK 0x400
82#define UART_INTERRUPT_MASK__RFIFO_ERR_FIELD 10,10
83#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_SHIFT 11
84#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_WIDTH 1
85#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RESET_VAL 1
86#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_RMASK 0x1
87#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_MASK 0x800
88#define UART_INTERRUPT_MASK__TFIFO_AEMPTY_FIELD 11,11
89#define UART_INTERRUPT_STATUS 0x0200
90#define UART_RECEIVE_DATA 0x0148
91#define UART_TRANSMIT_DATA 0x0140
92#define UART_TYPE 0x0160
93#define UART_TYPE__SBITS_SHIFT 0
94#define UART_TYPE__SBITS_WIDTH 1
95#define UART_TYPE__SBITS_RESET_VAL 1
96#define UART_TYPE__SBITS_RMASK 0x1
97#define UART_TYPE__SBITS_MASK 0x1
98#define UART_TYPE__SBITS_FIELD 0,0
99#define UART_TYPE__SBITS_VAL_ONE_SBITS 0x0
100#define UART_TYPE__SBITS_VAL_TWO_SBITS 0x1
101#define UART_TYPE__DBITS_SHIFT 2
102#define UART_TYPE__DBITS_WIDTH 1
103#define UART_TYPE__DBITS_RESET_VAL 0
104#define UART_TYPE__DBITS_RMASK 0x1
105#define UART_TYPE__DBITS_MASK 0x4
106#define UART_TYPE__DBITS_FIELD 2,2
107#define UART_TYPE__DBITS_VAL_EIGHT_DBITS 0x0
108#define UART_TYPE__DBITS_VAL_SEVEN_DBITS 0x1
109#define UART_TYPE__PTYPE_SHIFT 4
110#define UART_TYPE__PTYPE_WIDTH 3
111#define UART_TYPE__PTYPE_RESET_VAL 3
112#define UART_TYPE__PTYPE_RMASK 0x7
113#define UART_TYPE__PTYPE_MASK 0x70
114#define UART_TYPE__PTYPE_FIELD 4,6
115#define UART_TYPE__PTYPE_VAL_NONE 0x0
116#define UART_TYPE__PTYPE_VAL_MARK 0x1
117#define UART_TYPE__PTYPE_VAL_SPACE 0x2
118#define UART_TYPE__PTYPE_VAL_EVEN 0x3
119#define UART_TYPE__PTYPE_VAL_ODD 0x4
120#endif /* !defined(__ARCH_UART_DEF_H__) */
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index b17b9b8e53cd..664d6ad23f80 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -11,12 +11,13 @@ generic-y += errno.h
11generic-y += exec.h 11generic-y += exec.h
12generic-y += fb.h 12generic-y += fb.h
13generic-y += fcntl.h 13generic-y += fcntl.h
14generic-y += hw_irq.h
14generic-y += ioctl.h 15generic-y += ioctl.h
15generic-y += ioctls.h 16generic-y += ioctls.h
16generic-y += ipcbuf.h 17generic-y += ipcbuf.h
17generic-y += irq_regs.h 18generic-y += irq_regs.h
18generic-y += kdebug.h
19generic-y += local.h 19generic-y += local.h
20generic-y += local64.h
20generic-y += msgbuf.h 21generic-y += msgbuf.h
21generic-y += mutex.h 22generic-y += mutex.h
22generic-y += param.h 23generic-y += param.h
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index e71387ab20ca..d385eaadece7 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -114,6 +114,32 @@ static inline int atomic_read(const atomic_t *v)
114#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) 114#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
115 115
116/** 116/**
117 * atomic_xchg - atomically exchange contents of memory with a new value
118 * @v: pointer of type atomic_t
119 * @i: integer value to store in memory
120 *
121 * Atomically sets @v to @i and returns old @v
122 */
123static inline int atomic_xchg(atomic_t *v, int n)
124{
125 return xchg(&v->counter, n);
126}
127
128/**
129 * atomic_cmpxchg - atomically exchange contents of memory if it matches
130 * @v: pointer of type atomic_t
131 * @o: old value that memory should have
132 * @n: new value to write to memory if it matches
133 *
134 * Atomically checks if @v holds @o and replaces it with @n if so.
135 * Returns the old value at @v.
136 */
137static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
138{
139 return cmpxchg(&v->counter, o, n);
140}
141
142/**
117 * atomic_add_negative - add and test if negative 143 * atomic_add_negative - add and test if negative
118 * @v: pointer of type atomic_t 144 * @v: pointer of type atomic_t
119 * @i: integer value to add 145 * @i: integer value to add
@@ -133,6 +159,32 @@ static inline int atomic_read(const atomic_t *v)
133 159
134#ifndef __ASSEMBLY__ 160#ifndef __ASSEMBLY__
135 161
162/**
163 * atomic64_xchg - atomically exchange contents of memory with a new value
164 * @v: pointer of type atomic64_t
165 * @i: integer value to store in memory
166 *
167 * Atomically sets @v to @i and returns old @v
168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
170{
171 return xchg64(&v->counter, n);
172}
173
174/**
175 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
176 * @v: pointer of type atomic64_t
177 * @o: old value that memory should have
178 * @n: new value to write to memory if it matches
179 *
180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v.
182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
184{
185 return cmpxchg64(&v->counter, o, n);
186}
187
136static inline long long atomic64_dec_if_positive(atomic64_t *v) 188static inline long long atomic64_dec_if_positive(atomic64_t *v)
137{ 189{
138 long long c, old, dec; 190 long long c, old, dec;
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index e7fb5cfb9597..0d0395b1b152 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -22,40 +22,6 @@
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24 24
25/* Tile-specific routines to support <linux/atomic.h>. */
26int _atomic_xchg(atomic_t *v, int n);
27int _atomic_xchg_add(atomic_t *v, int i);
28int _atomic_xchg_add_unless(atomic_t *v, int a, int u);
29int _atomic_cmpxchg(atomic_t *v, int o, int n);
30
31/**
32 * atomic_xchg - atomically exchange contents of memory with a new value
33 * @v: pointer of type atomic_t
34 * @i: integer value to store in memory
35 *
36 * Atomically sets @v to @i and returns old @v
37 */
38static inline int atomic_xchg(atomic_t *v, int n)
39{
40 smp_mb(); /* barrier for proper semantics */
41 return _atomic_xchg(v, n);
42}
43
44/**
45 * atomic_cmpxchg - atomically exchange contents of memory if it matches
46 * @v: pointer of type atomic_t
47 * @o: old value that memory should have
48 * @n: new value to write to memory if it matches
49 *
50 * Atomically checks if @v holds @o and replaces it with @n if so.
51 * Returns the old value at @v.
52 */
53static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
54{
55 smp_mb(); /* barrier for proper semantics */
56 return _atomic_cmpxchg(v, o, n);
57}
58
59/** 25/**
60 * atomic_add - add integer to atomic variable 26 * atomic_add - add integer to atomic variable
61 * @i: integer value to add 27 * @i: integer value to add
@@ -65,7 +31,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
65 */ 31 */
66static inline void atomic_add(int i, atomic_t *v) 32static inline void atomic_add(int i, atomic_t *v)
67{ 33{
68 _atomic_xchg_add(v, i); 34 _atomic_xchg_add(&v->counter, i);
69} 35}
70 36
71/** 37/**
@@ -78,7 +44,7 @@ static inline void atomic_add(int i, atomic_t *v)
78static inline int atomic_add_return(int i, atomic_t *v) 44static inline int atomic_add_return(int i, atomic_t *v)
79{ 45{
80 smp_mb(); /* barrier for proper semantics */ 46 smp_mb(); /* barrier for proper semantics */
81 return _atomic_xchg_add(v, i) + i; 47 return _atomic_xchg_add(&v->counter, i) + i;
82} 48}
83 49
84/** 50/**
@@ -93,7 +59,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
93static inline int __atomic_add_unless(atomic_t *v, int a, int u) 59static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94{ 60{
95 smp_mb(); /* barrier for proper semantics */ 61 smp_mb(); /* barrier for proper semantics */
96 return _atomic_xchg_add_unless(v, a, u); 62 return _atomic_xchg_add_unless(&v->counter, a, u);
97} 63}
98 64
99/** 65/**
@@ -108,7 +74,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
108 */ 74 */
109static inline void atomic_set(atomic_t *v, int n) 75static inline void atomic_set(atomic_t *v, int n)
110{ 76{
111 _atomic_xchg(v, n); 77 _atomic_xchg(&v->counter, n);
112} 78}
113 79
114/* A 64bit atomic type */ 80/* A 64bit atomic type */
@@ -119,11 +85,6 @@ typedef struct {
119 85
120#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
121 87
122u64 _atomic64_xchg(atomic64_t *v, u64 n);
123u64 _atomic64_xchg_add(atomic64_t *v, u64 i);
124u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u);
125u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n);
126
127/** 88/**
128 * atomic64_read - read atomic variable 89 * atomic64_read - read atomic variable
129 * @v: pointer of type atomic64_t 90 * @v: pointer of type atomic64_t
@@ -137,35 +98,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
137 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
138 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
139 */ 100 */
140 return _atomic64_xchg_add((atomic64_t *)v, 0); 101 return _atomic64_xchg_add((u64 *)&v->counter, 0);
141}
142
143/**
144 * atomic64_xchg - atomically exchange contents of memory with a new value
145 * @v: pointer of type atomic64_t
146 * @i: integer value to store in memory
147 *
148 * Atomically sets @v to @i and returns old @v
149 */
150static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
151{
152 smp_mb(); /* barrier for proper semantics */
153 return _atomic64_xchg(v, n);
154}
155
156/**
157 * atomic64_cmpxchg - atomically exchange contents of memory if it matches
158 * @v: pointer of type atomic64_t
159 * @o: old value that memory should have
160 * @n: new value to write to memory if it matches
161 *
162 * Atomically checks if @v holds @o and replaces it with @n if so.
163 * Returns the old value at @v.
164 */
165static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
166{
167 smp_mb(); /* barrier for proper semantics */
168 return _atomic64_cmpxchg(v, o, n);
169} 102}
170 103
171/** 104/**
@@ -177,7 +110,7 @@ static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
177 */ 110 */
178static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(u64 i, atomic64_t *v)
179{ 112{
180 _atomic64_xchg_add(v, i); 113 _atomic64_xchg_add(&v->counter, i);
181} 114}
182 115
183/** 116/**
@@ -190,7 +123,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
190static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
191{ 124{
192 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
193 return _atomic64_xchg_add(v, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
194} 127}
195 128
196/** 129/**
@@ -205,7 +138,7 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
205static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
206{ 139{
207 smp_mb(); /* barrier for proper semantics */ 140 smp_mb(); /* barrier for proper semantics */
208 return _atomic64_xchg_add_unless(v, a, u) != u; 141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
209} 142}
210 143
211/** 144/**
@@ -220,7 +153,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
220 */ 153 */
221static inline void atomic64_set(atomic64_t *v, u64 n) 154static inline void atomic64_set(atomic64_t *v, u64 n)
222{ 155{
223 _atomic64_xchg(v, n); 156 _atomic64_xchg(&v->counter, n);
224} 157}
225 158
226#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) 159#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
@@ -252,21 +185,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
252 * Internal definitions only beyond this point. 185 * Internal definitions only beyond this point.
253 */ 186 */
254 187
255#define ATOMIC_LOCKS_FOUND_VIA_TABLE() \
256 (!CHIP_HAS_CBOX_HOME_MAP() && defined(CONFIG_SMP))
257
258#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
259
260/* Number of entries in atomic_lock_ptr[]. */
261#define ATOMIC_HASH_L1_SHIFT 6
262#define ATOMIC_HASH_L1_SIZE (1 << ATOMIC_HASH_L1_SHIFT)
263
264/* Number of locks in each struct pointed to by atomic_lock_ptr[]. */
265#define ATOMIC_HASH_L2_SHIFT (CHIP_L2_LOG_LINE_SIZE() - 2)
266#define ATOMIC_HASH_L2_SIZE (1 << ATOMIC_HASH_L2_SHIFT)
267
268#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
269
270/* 188/*
271 * Number of atomic locks in atomic_locks[]. Must be a power of two. 189 * Number of atomic locks in atomic_locks[]. Must be a power of two.
272 * There is no reason for more than PAGE_SIZE / 8 entries, since that 190 * There is no reason for more than PAGE_SIZE / 8 entries, since that
@@ -281,8 +199,6 @@ static inline void atomic64_set(atomic64_t *v, u64 n)
281extern int atomic_locks[]; 199extern int atomic_locks[];
282#endif 200#endif
283 201
284#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
285
286/* 202/*
287 * All the code that may fault while holding an atomic lock must 203 * All the code that may fault while holding an atomic lock must
288 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code 204 * place the pointer to the lock in ATOMIC_LOCK_REG so the fault code
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index f4500c688ffa..ad220eed05fc 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -32,25 +32,6 @@
32 * on any routine which updates memory and returns a value. 32 * on any routine which updates memory and returns a value.
33 */ 33 */
34 34
35static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
36{
37 int val;
38 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
39 smp_mb(); /* barrier for proper semantics */
40 val = __insn_cmpexch4((void *)&v->counter, n);
41 smp_mb(); /* barrier for proper semantics */
42 return val;
43}
44
45static inline int atomic_xchg(atomic_t *v, int n)
46{
47 int val;
48 smp_mb(); /* barrier for proper semantics */
49 val = __insn_exch4((void *)&v->counter, n);
50 smp_mb(); /* barrier for proper semantics */
51 return val;
52}
53
54static inline void atomic_add(int i, atomic_t *v) 35static inline void atomic_add(int i, atomic_t *v)
55{ 36{
56 __insn_fetchadd4((void *)&v->counter, i); 37 __insn_fetchadd4((void *)&v->counter, i);
@@ -72,7 +53,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
72 if (oldval == u) 53 if (oldval == u)
73 break; 54 break;
74 guess = oldval; 55 guess = oldval;
75 oldval = atomic_cmpxchg(v, guess, guess + a); 56 oldval = cmpxchg(&v->counter, guess, guess + a);
76 } while (guess != oldval); 57 } while (guess != oldval);
77 return oldval; 58 return oldval;
78} 59}
@@ -84,25 +65,6 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
84#define atomic64_read(v) ((v)->counter) 65#define atomic64_read(v) ((v)->counter)
85#define atomic64_set(v, i) ((v)->counter = (i)) 66#define atomic64_set(v, i) ((v)->counter = (i))
86 67
87static inline long atomic64_cmpxchg(atomic64_t *v, long o, long n)
88{
89 long val;
90 smp_mb(); /* barrier for proper semantics */
91 __insn_mtspr(SPR_CMPEXCH_VALUE, o);
92 val = __insn_cmpexch((void *)&v->counter, n);
93 smp_mb(); /* barrier for proper semantics */
94 return val;
95}
96
97static inline long atomic64_xchg(atomic64_t *v, long n)
98{
99 long val;
100 smp_mb(); /* barrier for proper semantics */
101 val = __insn_exch((void *)&v->counter, n);
102 smp_mb(); /* barrier for proper semantics */
103 return val;
104}
105
106static inline void atomic64_add(long i, atomic64_t *v) 68static inline void atomic64_add(long i, atomic64_t *v)
107{ 69{
108 __insn_fetchadd((void *)&v->counter, i); 70 __insn_fetchadd((void *)&v->counter, i);
@@ -124,7 +86,7 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
124 if (oldval == u) 86 if (oldval == u)
125 break; 87 break;
126 guess = oldval; 88 guess = oldval;
127 oldval = atomic64_cmpxchg(v, guess, guess + a); 89 oldval = cmpxchg(&v->counter, guess, guess + a);
128 } while (guess != oldval); 90 } while (guess != oldval);
129 return oldval != u; 91 return oldval != u;
130} 92}
diff --git a/arch/tile/include/asm/barrier.h b/arch/tile/include/asm/barrier.h
index 990a217a0b72..a9a73da5865d 100644
--- a/arch/tile/include/asm/barrier.h
+++ b/arch/tile/include/asm/barrier.h
@@ -77,7 +77,6 @@
77 77
78#define __sync() __insn_mf() 78#define __sync() __insn_mf()
79 79
80#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
81#include <hv/syscall_public.h> 80#include <hv/syscall_public.h>
82/* 81/*
83 * Issue an uncacheable load to each memory controller, then 82 * Issue an uncacheable load to each memory controller, then
@@ -96,7 +95,6 @@ static inline void __mb_incoherent(void)
96 "r20", "r21", "r22", "r23", "r24", 95 "r20", "r21", "r22", "r23", "r24",
97 "r25", "r26", "r27", "r28", "r29"); 96 "r25", "r26", "r27", "r28", "r29");
98} 97}
99#endif
100 98
101/* Fence to guarantee visibility of stores to incoherent memory. */ 99/* Fence to guarantee visibility of stores to incoherent memory. */
102static inline void 100static inline void
@@ -104,7 +102,6 @@ mb_incoherent(void)
104{ 102{
105 __insn_mf(); 103 __insn_mf();
106 104
107#if !CHIP_HAS_MF_WAITS_FOR_VICTIMS()
108 { 105 {
109#if CHIP_HAS_TILE_WRITE_PENDING() 106#if CHIP_HAS_TILE_WRITE_PENDING()
110 const unsigned long WRITE_TIMEOUT_CYCLES = 400; 107 const unsigned long WRITE_TIMEOUT_CYCLES = 400;
@@ -116,7 +113,6 @@ mb_incoherent(void)
116#endif /* CHIP_HAS_TILE_WRITE_PENDING() */ 113#endif /* CHIP_HAS_TILE_WRITE_PENDING() */
117 (void) __mb_incoherent(); 114 (void) __mb_incoherent();
118 } 115 }
119#endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */
120} 116}
121 117
122#define fast_wmb() __sync() 118#define fast_wmb() __sync()
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index bd186c4eaa50..d5a206865036 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -29,17 +29,6 @@
29#endif 29#endif
30 30
31/** 31/**
32 * __ffs - find first set bit in word
33 * @word: The word to search
34 *
35 * Undefined if no set bit exists, so code should check against 0 first.
36 */
37static inline unsigned long __ffs(unsigned long word)
38{
39 return __builtin_ctzl(word);
40}
41
42/**
43 * ffz - find first zero bit in word 32 * ffz - find first zero bit in word
44 * @word: The word to search 33 * @word: The word to search
45 * 34 *
@@ -50,33 +39,6 @@ static inline unsigned long ffz(unsigned long word)
50 return __builtin_ctzl(~word); 39 return __builtin_ctzl(~word);
51} 40}
52 41
53/**
54 * __fls - find last set bit in word
55 * @word: The word to search
56 *
57 * Undefined if no set bit exists, so code should check against 0 first.
58 */
59static inline unsigned long __fls(unsigned long word)
60{
61 return (sizeof(word) * 8) - 1 - __builtin_clzl(word);
62}
63
64/**
65 * ffs - find first set bit in word
66 * @x: the word to search
67 *
68 * This is defined the same way as the libc and compiler builtin ffs
69 * routines, therefore differs in spirit from the other bitops.
70 *
71 * ffs(value) returns 0 if value is 0 or the position of the first
72 * set bit if value is nonzero. The first (least significant) bit
73 * is at position 1.
74 */
75static inline int ffs(int x)
76{
77 return __builtin_ffs(x);
78}
79
80static inline int fls64(__u64 w) 42static inline int fls64(__u64 w)
81{ 43{
82 return (sizeof(__u64) * 8) - __builtin_clzll(w); 44 return (sizeof(__u64) * 8) - __builtin_clzll(w);
@@ -118,6 +80,9 @@ static inline unsigned long __arch_hweight64(__u64 w)
118 return __builtin_popcountll(w); 80 return __builtin_popcountll(w);
119} 81}
120 82
83#include <asm-generic/bitops/builtin-__ffs.h>
84#include <asm-generic/bitops/builtin-__fls.h>
85#include <asm-generic/bitops/builtin-ffs.h>
121#include <asm-generic/bitops/const_hweight.h> 86#include <asm-generic/bitops/const_hweight.h>
122#include <asm-generic/bitops/lock.h> 87#include <asm-generic/bitops/lock.h>
123#include <asm-generic/bitops/find.h> 88#include <asm-generic/bitops/find.h>
diff --git a/arch/tile/include/asm/bitops_32.h b/arch/tile/include/asm/bitops_32.h
index ddc4c1efde43..386865ad2f55 100644
--- a/arch/tile/include/asm/bitops_32.h
+++ b/arch/tile/include/asm/bitops_32.h
@@ -16,7 +16,7 @@
16#define _ASM_TILE_BITOPS_32_H 16#define _ASM_TILE_BITOPS_32_H
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <asm/barrier.h>
20 20
21/* Tile-specific routines to support <asm/bitops.h>. */ 21/* Tile-specific routines to support <asm/bitops.h>. */
22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask); 22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
diff --git a/arch/tile/include/asm/bitops_64.h b/arch/tile/include/asm/bitops_64.h
index 60b87ee54fb8..ad34cd056085 100644
--- a/arch/tile/include/asm/bitops_64.h
+++ b/arch/tile/include/asm/bitops_64.h
@@ -16,7 +16,7 @@
16#define _ASM_TILE_BITOPS_64_H 16#define _ASM_TILE_BITOPS_64_H
17 17
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/atomic.h> 19#include <asm/cmpxchg.h>
20 20
21/* See <asm/bitops.h> for API comments. */ 21/* See <asm/bitops.h> for API comments. */
22 22
@@ -44,8 +44,7 @@ static inline void change_bit(unsigned nr, volatile unsigned long *addr)
44 oldval = *addr; 44 oldval = *addr;
45 do { 45 do {
46 guess = oldval; 46 guess = oldval;
47 oldval = atomic64_cmpxchg((atomic64_t *)addr, 47 oldval = cmpxchg(addr, guess, guess ^ mask);
48 guess, guess ^ mask);
49 } while (guess != oldval); 48 } while (guess != oldval);
50} 49}
51 50
@@ -90,8 +89,7 @@ static inline int test_and_change_bit(unsigned nr,
90 oldval = *addr; 89 oldval = *addr;
91 do { 90 do {
92 guess = oldval; 91 guess = oldval;
93 oldval = atomic64_cmpxchg((atomic64_t *)addr, 92 oldval = cmpxchg(addr, guess, guess ^ mask);
94 guess, guess ^ mask);
95 } while (guess != oldval); 93 } while (guess != oldval);
96 return (oldval & mask) != 0; 94 return (oldval & mask) != 0;
97} 95}
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
index a9a529964e07..6160761d5f61 100644
--- a/arch/tile/include/asm/cache.h
+++ b/arch/tile/include/asm/cache.h
@@ -49,9 +49,16 @@
49#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 49#define __read_mostly __attribute__((__section__(".data..read_mostly")))
50 50
51/* 51/*
52 * Attribute for data that is kept read/write coherent until the end of 52 * Originally we used small TLB pages for kernel data and grouped some
53 * initialization, then bumped to read/only incoherent for performance. 53 * things together as "write once", enforcing the property at the end
54 * of initialization by making those pages read-only and non-coherent.
55 * This allowed better cache utilization since cache inclusion did not
56 * need to be maintained. However, to do this requires an extra TLB
57 * entry, which on balance is more of a performance hit than the
58 * non-coherence is a performance gain, so we now just make "read
59 * mostly" and "write once" be synonyms. We keep the attribute
60 * separate in case we change our minds at a future date.
54 */ 61 */
55#define __write_once __attribute__((__section__(".w1data"))) 62#define __write_once __read_mostly
56 63
57#endif /* _ASM_TILE_CACHE_H */ 64#endif /* _ASM_TILE_CACHE_H */
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index 0fc63c488edf..92ee4c8a4f76 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -75,23 +75,6 @@ static inline void copy_to_user_page(struct vm_area_struct *vma,
75#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 75#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
76 memcpy((dst), (src), (len)) 76 memcpy((dst), (src), (len))
77 77
78/*
79 * Invalidate a VA range; pads to L2 cacheline boundaries.
80 *
81 * Note that on TILE64, __inv_buffer() actually flushes modified
82 * cache lines in addition to invalidating them, i.e., it's the
83 * same as __finv_buffer().
84 */
85static inline void __inv_buffer(void *buffer, size_t size)
86{
87 char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
88 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
89 while (next < finish) {
90 __insn_inv(next);
91 next += CHIP_INV_STRIDE();
92 }
93}
94
95/* Flush a VA range; pads to L2 cacheline boundaries. */ 78/* Flush a VA range; pads to L2 cacheline boundaries. */
96static inline void __flush_buffer(void *buffer, size_t size) 79static inline void __flush_buffer(void *buffer, size_t size)
97{ 80{
@@ -115,13 +98,6 @@ static inline void __finv_buffer(void *buffer, size_t size)
115} 98}
116 99
117 100
118/* Invalidate a VA range and wait for it to be complete. */
119static inline void inv_buffer(void *buffer, size_t size)
120{
121 __inv_buffer(buffer, size);
122 mb();
123}
124
125/* 101/*
126 * Flush a locally-homecached VA range and wait for the evicted 102 * Flush a locally-homecached VA range and wait for the evicted
127 * cachelines to hit memory. 103 * cachelines to hit memory.
@@ -142,6 +118,26 @@ static inline void finv_buffer_local(void *buffer, size_t size)
142 mb_incoherent(); 118 mb_incoherent();
143} 119}
144 120
121#ifdef __tilepro__
122/* Invalidate a VA range; pads to L2 cacheline boundaries. */
123static inline void __inv_buffer(void *buffer, size_t size)
124{
125 char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
126 char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
127 while (next < finish) {
128 __insn_inv(next);
129 next += CHIP_INV_STRIDE();
130 }
131}
132
133/* Invalidate a VA range and wait for it to be complete. */
134static inline void inv_buffer(void *buffer, size_t size)
135{
136 __inv_buffer(buffer, size);
137 mb();
138}
139#endif
140
145/* 141/*
146 * Flush and invalidate a VA range that is homed remotely, waiting 142 * Flush and invalidate a VA range that is homed remotely, waiting
147 * until the memory controller holds the flushed values. If "hfh" is 143 * until the memory controller holds the flushed values. If "hfh" is
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 276f067e3640..4001d5eab4bb 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -20,53 +20,108 @@
20 20
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23/* Nonexistent functions intended to cause link errors. */ 23#include <asm/barrier.h>
24extern unsigned long __xchg_called_with_bad_pointer(void);
25extern unsigned long __cmpxchg_called_with_bad_pointer(void);
26 24
27#define xchg(ptr, x) \ 25/* Nonexistent functions intended to cause compile errors. */
26extern void __xchg_called_with_bad_pointer(void)
27 __compiletime_error("Bad argument size for xchg");
28extern void __cmpxchg_called_with_bad_pointer(void)
29 __compiletime_error("Bad argument size for cmpxchg");
30
31#ifndef __tilegx__
32
33/* Note the _atomic_xxx() routines include a final mb(). */
34int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n);
39u64 _atomic64_xchg_add(u64 *v, u64 i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
42
43#define xchg(ptr, n) \
44 ({ \
45 if (sizeof(*(ptr)) != 4) \
46 __xchg_called_with_bad_pointer(); \
47 smp_mb(); \
48 (typeof(*(ptr)))_atomic_xchg((int *)(ptr), (int)(n)); \
49 })
50
51#define cmpxchg(ptr, o, n) \
52 ({ \
53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
57 })
58
59#define xchg64(ptr, n) \
60 ({ \
61 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
65 })
66
67#define cmpxchg64(ptr, o, n) \
68 ({ \
69 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
73 })
74
75#else
76
77#define xchg(ptr, n) \
28 ({ \ 78 ({ \
29 typeof(*(ptr)) __x; \ 79 typeof(*(ptr)) __x; \
80 smp_mb(); \
30 switch (sizeof(*(ptr))) { \ 81 switch (sizeof(*(ptr))) { \
31 case 4: \ 82 case 4: \
32 __x = (typeof(__x))(typeof(__x-__x))atomic_xchg( \ 83 __x = (typeof(__x))(unsigned long) \
33 (atomic_t *)(ptr), \ 84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \
34 (u32)(typeof((x)-(x)))(x)); \
35 break; \ 85 break; \
36 case 8: \ 86 case 8: \
37 __x = (typeof(__x))(typeof(__x-__x))atomic64_xchg( \ 87 __x = (typeof(__x)) \
38 (atomic64_t *)(ptr), \ 88 __insn_exch((ptr), (unsigned long)(n)); \
39 (u64)(typeof((x)-(x)))(x)); \
40 break; \ 89 break; \
41 default: \ 90 default: \
42 __xchg_called_with_bad_pointer(); \ 91 __xchg_called_with_bad_pointer(); \
92 break; \
43 } \ 93 } \
94 smp_mb(); \
44 __x; \ 95 __x; \
45 }) 96 })
46 97
47#define cmpxchg(ptr, o, n) \ 98#define cmpxchg(ptr, o, n) \
48 ({ \ 99 ({ \
49 typeof(*(ptr)) __x; \ 100 typeof(*(ptr)) __x; \
101 __insn_mtspr(SPR_CMPEXCH_VALUE, (unsigned long)(o)); \
102 smp_mb(); \
50 switch (sizeof(*(ptr))) { \ 103 switch (sizeof(*(ptr))) { \
51 case 4: \ 104 case 4: \
52 __x = (typeof(__x))(typeof(__x-__x))atomic_cmpxchg( \ 105 __x = (typeof(__x))(unsigned long) \
53 (atomic_t *)(ptr), \ 106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
54 (u32)(typeof((o)-(o)))(o), \
55 (u32)(typeof((n)-(n)))(n)); \
56 break; \ 107 break; \
57 case 8: \ 108 case 8: \
58 __x = (typeof(__x))(typeof(__x-__x))atomic64_cmpxchg( \ 109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
59 (atomic64_t *)(ptr), \
60 (u64)(typeof((o)-(o)))(o), \
61 (u64)(typeof((n)-(n)))(n)); \
62 break; \ 110 break; \
63 default: \ 111 default: \
64 __cmpxchg_called_with_bad_pointer(); \ 112 __cmpxchg_called_with_bad_pointer(); \
113 break; \
65 } \ 114 } \
115 smp_mb(); \
66 __x; \ 116 __x; \
67 }) 117 })
68 118
69#define tas(ptr) (xchg((ptr), 1)) 119#define xchg64 xchg
120#define cmpxchg64 cmpxchg
121
122#endif
123
124#define tas(ptr) xchg((ptr), 1)
70 125
71#endif /* __ASSEMBLY__ */ 126#endif /* __ASSEMBLY__ */
72 127
diff --git a/arch/tile/include/asm/device.h b/arch/tile/include/asm/device.h
index 5182705bd056..6ab8bf146d4c 100644
--- a/arch/tile/include/asm/device.h
+++ b/arch/tile/include/asm/device.h
@@ -23,7 +23,10 @@ struct dev_archdata {
23 /* Offset of the DMA address from the PA. */ 23 /* Offset of the DMA address from the PA. */
24 dma_addr_t dma_offset; 24 dma_addr_t dma_offset;
25 25
26 /* Highest DMA address that can be generated by this device. */ 26 /*
27 * Highest DMA address that can be generated by devices that
28 * have limited DMA capability, i.e. non 64-bit capable.
29 */
27 dma_addr_t max_direct_dma_addr; 30 dma_addr_t max_direct_dma_addr;
28}; 31};
29 32
diff --git a/arch/tile/include/asm/dma-mapping.h b/arch/tile/include/asm/dma-mapping.h
index f2ff191376b4..1eae359d8315 100644
--- a/arch/tile/include/asm/dma-mapping.h
+++ b/arch/tile/include/asm/dma-mapping.h
@@ -20,9 +20,14 @@
20#include <linux/cache.h> 20#include <linux/cache.h>
21#include <linux/io.h> 21#include <linux/io.h>
22 22
23#ifdef __tilegx__
24#define ARCH_HAS_DMA_GET_REQUIRED_MASK
25#endif
26
23extern struct dma_map_ops *tile_dma_map_ops; 27extern struct dma_map_ops *tile_dma_map_ops;
24extern struct dma_map_ops *gx_pci_dma_map_ops; 28extern struct dma_map_ops *gx_pci_dma_map_ops;
25extern struct dma_map_ops *gx_legacy_pci_dma_map_ops; 29extern struct dma_map_ops *gx_legacy_pci_dma_map_ops;
30extern struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
26 31
27static inline struct dma_map_ops *get_dma_ops(struct device *dev) 32static inline struct dma_map_ops *get_dma_ops(struct device *dev)
28{ 33{
@@ -44,12 +49,12 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
44 49
45static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 50static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
46{ 51{
47 return paddr + get_dma_offset(dev); 52 return paddr;
48} 53}
49 54
50static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 55static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
51{ 56{
52 return daddr - get_dma_offset(dev); 57 return daddr;
53} 58}
54 59
55static inline void dma_mark_clean(void *addr, size_t size) {} 60static inline void dma_mark_clean(void *addr, size_t size) {}
@@ -87,11 +92,19 @@ dma_set_mask(struct device *dev, u64 mask)
87{ 92{
88 struct dma_map_ops *dma_ops = get_dma_ops(dev); 93 struct dma_map_ops *dma_ops = get_dma_ops(dev);
89 94
90 /* Handle legacy PCI devices with limited memory addressability. */ 95 /*
91 if ((dma_ops == gx_pci_dma_map_ops) && (mask <= DMA_BIT_MASK(32))) { 96 * For PCI devices with 64-bit DMA addressing capability, promote
92 set_dma_ops(dev, gx_legacy_pci_dma_map_ops); 97 * the dma_ops to hybrid, with the consistent memory DMA space limited
93 set_dma_offset(dev, 0); 98 * to 32-bit. For 32-bit capable devices, limit the streaming DMA
94 if (mask > dev->archdata.max_direct_dma_addr) 99 * address range to max_direct_dma_addr.
100 */
101 if (dma_ops == gx_pci_dma_map_ops ||
102 dma_ops == gx_hybrid_pci_dma_map_ops ||
103 dma_ops == gx_legacy_pci_dma_map_ops) {
104 if (mask == DMA_BIT_MASK(64) &&
105 dma_ops == gx_legacy_pci_dma_map_ops)
106 set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
107 else if (mask > dev->archdata.max_direct_dma_addr)
95 mask = dev->archdata.max_direct_dma_addr; 108 mask = dev->archdata.max_direct_dma_addr;
96 } 109 }
97 110
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index ff8a93408823..41d9878a9686 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -30,7 +30,6 @@ typedef unsigned long elf_greg_t;
30#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) 30#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
31typedef elf_greg_t elf_gregset_t[ELF_NGREG]; 31typedef elf_greg_t elf_gregset_t[ELF_NGREG];
32 32
33#define EM_TILE64 187
34#define EM_TILEPRO 188 33#define EM_TILEPRO 188
35#define EM_TILEGX 191 34#define EM_TILEGX 191
36 35
@@ -132,6 +131,15 @@ extern int dump_task_regs(struct task_struct *, elf_gregset_t *);
132struct linux_binprm; 131struct linux_binprm;
133extern int arch_setup_additional_pages(struct linux_binprm *bprm, 132extern int arch_setup_additional_pages(struct linux_binprm *bprm,
134 int executable_stack); 133 int executable_stack);
134#define ARCH_DLINFO \
135do { \
136 NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \
137} while (0)
138
139struct mm_struct;
140extern unsigned long arch_randomize_brk(struct mm_struct *mm);
141#define arch_randomize_brk arch_randomize_brk
142
135#ifdef CONFIG_COMPAT 143#ifdef CONFIG_COMPAT
136 144
137#define COMPAT_ELF_PLATFORM "tilegx-m32" 145#define COMPAT_ELF_PLATFORM "tilegx-m32"
diff --git a/arch/tile/include/asm/fixmap.h b/arch/tile/include/asm/fixmap.h
index e16dbf929cb5..c6b9c1b38fd1 100644
--- a/arch/tile/include/asm/fixmap.h
+++ b/arch/tile/include/asm/fixmap.h
@@ -78,14 +78,6 @@ enum fixed_addresses {
78#endif 78#endif
79}; 79};
80 80
81extern void __set_fixmap(enum fixed_addresses idx,
82 unsigned long phys, pgprot_t flags);
83
84#define set_fixmap(idx, phys) \
85 __set_fixmap(idx, phys, PAGE_KERNEL)
86#define clear_fixmap(idx) \
87 __set_fixmap(idx, 0, __pgprot(0))
88
89#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) 81#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
90#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) 82#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
91#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE) 83#define FIXADDR_START (FIXADDR_TOP + PAGE_SIZE - __FIXADDR_SIZE)
diff --git a/arch/tile/include/asm/ftrace.h b/arch/tile/include/asm/ftrace.h
index 461459b06d98..13a9bb81a8ab 100644
--- a/arch/tile/include/asm/ftrace.h
+++ b/arch/tile/include/asm/ftrace.h
@@ -15,6 +15,26 @@
15#ifndef _ASM_TILE_FTRACE_H 15#ifndef _ASM_TILE_FTRACE_H
16#define _ASM_TILE_FTRACE_H 16#define _ASM_TILE_FTRACE_H
17 17
18/* empty */ 18#ifdef CONFIG_FUNCTION_TRACER
19
20#define MCOUNT_ADDR ((unsigned long)(__mcount))
21#define MCOUNT_INSN_SIZE 8 /* sizeof mcount call */
22
23#ifndef __ASSEMBLY__
24extern void __mcount(void);
25
26#ifdef CONFIG_DYNAMIC_FTRACE
27static inline unsigned long ftrace_call_adjust(unsigned long addr)
28{
29 return addr;
30}
31
32struct dyn_arch_ftrace {
33};
34#endif /* CONFIG_DYNAMIC_FTRACE */
35
36#endif /* __ASSEMBLY__ */
37
38#endif /* CONFIG_FUNCTION_TRACER */
19 39
20#endif /* _ASM_TILE_FTRACE_H */ 40#endif /* _ASM_TILE_FTRACE_H */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 5909ac3d7218..1a6ef1b69cb1 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -43,6 +43,7 @@
43 ".pushsection .fixup,\"ax\"\n" \ 43 ".pushsection .fixup,\"ax\"\n" \
44 "0: { movei %0, %5; j 9f }\n" \ 44 "0: { movei %0, %5; j 9f }\n" \
45 ".section __ex_table,\"a\"\n" \ 45 ".section __ex_table,\"a\"\n" \
46 ".align 8\n" \
46 ".quad 1b, 0b\n" \ 47 ".quad 1b, 0b\n" \
47 ".popsection\n" \ 48 ".popsection\n" \
48 "9:" \ 49 "9:" \
diff --git a/arch/tile/include/asm/homecache.h b/arch/tile/include/asm/homecache.h
index 7b7771328642..7ddd1b8d6910 100644
--- a/arch/tile/include/asm/homecache.h
+++ b/arch/tile/include/asm/homecache.h
@@ -33,8 +33,7 @@ struct zone;
33 33
34/* 34/*
35 * Is this page immutable (unwritable) and thus able to be cached more 35 * Is this page immutable (unwritable) and thus able to be cached more
36 * widely than would otherwise be possible? On tile64 this means we 36 * widely than would otherwise be possible? This means we have "nc" set.
37 * mark the PTE to cache locally; on tilepro it means we have "nc" set.
38 */ 37 */
39#define PAGE_HOME_IMMUTABLE -2 38#define PAGE_HOME_IMMUTABLE -2
40 39
@@ -44,16 +43,8 @@ struct zone;
44 */ 43 */
45#define PAGE_HOME_INCOHERENT -3 44#define PAGE_HOME_INCOHERENT -3
46 45
47#if CHIP_HAS_CBOX_HOME_MAP()
48/* Home for the page is distributed via hash-for-home. */ 46/* Home for the page is distributed via hash-for-home. */
49#define PAGE_HOME_HASH -4 47#define PAGE_HOME_HASH -4
50#endif
51
52/* Homing is unknown or unspecified. Not valid for page_home(). */
53#define PAGE_HOME_UNKNOWN -5
54
55/* Home on the current cpu. Not valid for page_home(). */
56#define PAGE_HOME_HERE -6
57 48
58/* Support wrapper to use instead of explicit hv_flush_remote(). */ 49/* Support wrapper to use instead of explicit hv_flush_remote(). */
59extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length, 50extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index 31672918064c..9fe434969fab 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -19,7 +19,8 @@
19#include <linux/bug.h> 19#include <linux/bug.h>
20#include <asm/page.h> 20#include <asm/page.h>
21 21
22#define IO_SPACE_LIMIT 0xfffffffful 22/* Maximum PCI I/O space address supported. */
23#define IO_SPACE_LIMIT 0xffffffff
23 24
24/* 25/*
25 * Convert a physical pointer to a virtual kernel pointer for /dev/mem 26 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
@@ -254,7 +255,7 @@ static inline void writeq(u64 val, unsigned long addr)
254 255
255static inline void memset_io(volatile void *dst, int val, size_t len) 256static inline void memset_io(volatile void *dst, int val, size_t len)
256{ 257{
257 int x; 258 size_t x;
258 BUG_ON((unsigned long)dst & 0x3); 259 BUG_ON((unsigned long)dst & 0x3);
259 val = (val & 0xff) * 0x01010101; 260 val = (val & 0xff) * 0x01010101;
260 for (x = 0; x < len; x += 4) 261 for (x = 0; x < len; x += 4)
@@ -264,7 +265,7 @@ static inline void memset_io(volatile void *dst, int val, size_t len)
264static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, 265static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
265 size_t len) 266 size_t len)
266{ 267{
267 int x; 268 size_t x;
268 BUG_ON((unsigned long)src & 0x3); 269 BUG_ON((unsigned long)src & 0x3);
269 for (x = 0; x < len; x += 4) 270 for (x = 0; x < len; x += 4)
270 *(u32 *)(dst + x) = readl(src + x); 271 *(u32 *)(dst + x) = readl(src + x);
@@ -273,7 +274,7 @@ static inline void memcpy_fromio(void *dst, const volatile void __iomem *src,
273static inline void memcpy_toio(volatile void __iomem *dst, const void *src, 274static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
274 size_t len) 275 size_t len)
275{ 276{
276 int x; 277 size_t x;
277 BUG_ON((unsigned long)dst & 0x3); 278 BUG_ON((unsigned long)dst & 0x3);
278 for (x = 0; x < len; x += 4) 279 for (x = 0; x < len; x += 4)
279 writel(*(u32 *)(src + x), dst + x); 280 writel(*(u32 *)(src + x), dst + x);
@@ -281,8 +282,108 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
281 282
282#endif 283#endif
283 284
285#if CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO)
286
287static inline u8 inb(unsigned long addr)
288{
289 return readb((volatile void __iomem *) addr);
290}
291
292static inline u16 inw(unsigned long addr)
293{
294 return readw((volatile void __iomem *) addr);
295}
296
297static inline u32 inl(unsigned long addr)
298{
299 return readl((volatile void __iomem *) addr);
300}
301
302static inline void outb(u8 b, unsigned long addr)
303{
304 writeb(b, (volatile void __iomem *) addr);
305}
306
307static inline void outw(u16 b, unsigned long addr)
308{
309 writew(b, (volatile void __iomem *) addr);
310}
311
312static inline void outl(u32 b, unsigned long addr)
313{
314 writel(b, (volatile void __iomem *) addr);
315}
316
317static inline void insb(unsigned long addr, void *buffer, int count)
318{
319 if (count) {
320 u8 *buf = buffer;
321 do {
322 u8 x = inb(addr);
323 *buf++ = x;
324 } while (--count);
325 }
326}
327
328static inline void insw(unsigned long addr, void *buffer, int count)
329{
330 if (count) {
331 u16 *buf = buffer;
332 do {
333 u16 x = inw(addr);
334 *buf++ = x;
335 } while (--count);
336 }
337}
338
339static inline void insl(unsigned long addr, void *buffer, int count)
340{
341 if (count) {
342 u32 *buf = buffer;
343 do {
344 u32 x = inl(addr);
345 *buf++ = x;
346 } while (--count);
347 }
348}
349
350static inline void outsb(unsigned long addr, const void *buffer, int count)
351{
352 if (count) {
353 const u8 *buf = buffer;
354 do {
355 outb(*buf++, addr);
356 } while (--count);
357 }
358}
359
360static inline void outsw(unsigned long addr, const void *buffer, int count)
361{
362 if (count) {
363 const u16 *buf = buffer;
364 do {
365 outw(*buf++, addr);
366 } while (--count);
367 }
368}
369
370static inline void outsl(unsigned long addr, const void *buffer, int count)
371{
372 if (count) {
373 const u32 *buf = buffer;
374 do {
375 outl(*buf++, addr);
376 } while (--count);
377 }
378}
379
380extern void __iomem *ioport_map(unsigned long port, unsigned int len);
381extern void ioport_unmap(void __iomem *addr);
382
383#else
384
284/* 385/*
285 * The Tile architecture does not support IOPORT, even with PCI. 386 * The TilePro architecture does not support IOPORT, even with PCI.
286 * Unfortunately we can't yet simply not declare these methods, 387 * Unfortunately we can't yet simply not declare these methods,
287 * since some generic code that compiles into the kernel, but 388 * since some generic code that compiles into the kernel, but
288 * we never run, uses them unconditionally. 389 * we never run, uses them unconditionally.
@@ -290,7 +391,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
290 391
291static inline long ioport_panic(void) 392static inline long ioport_panic(void)
292{ 393{
394#ifdef __tilegx__
395 panic("PCI IO space support is disabled. Configure the kernel with"
396 " CONFIG_TILE_PCI_IO to enable it");
397#else
293 panic("inb/outb and friends do not exist on tile"); 398 panic("inb/outb and friends do not exist on tile");
399#endif
294 return 0; 400 return 0;
295} 401}
296 402
@@ -335,13 +441,6 @@ static inline void outl(u32 b, unsigned long addr)
335 ioport_panic(); 441 ioport_panic();
336} 442}
337 443
338#define inb_p(addr) inb(addr)
339#define inw_p(addr) inw(addr)
340#define inl_p(addr) inl(addr)
341#define outb_p(x, addr) outb((x), (addr))
342#define outw_p(x, addr) outw((x), (addr))
343#define outl_p(x, addr) outl((x), (addr))
344
345static inline void insb(unsigned long addr, void *buffer, int count) 444static inline void insb(unsigned long addr, void *buffer, int count)
346{ 445{
347 ioport_panic(); 446 ioport_panic();
@@ -372,6 +471,15 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
372 ioport_panic(); 471 ioport_panic();
373} 472}
374 473
474#endif /* CHIP_HAS_MMIO() && defined(CONFIG_TILE_PCI_IO) */
475
476#define inb_p(addr) inb(addr)
477#define inw_p(addr) inw(addr)
478#define inl_p(addr) inl(addr)
479#define outb_p(x, addr) outb((x), (addr))
480#define outw_p(x, addr) outw((x), (addr))
481#define outl_p(x, addr) outl((x), (addr))
482
375#define ioread16be(addr) be16_to_cpu(ioread16(addr)) 483#define ioread16be(addr) be16_to_cpu(ioread16(addr))
376#define ioread32be(addr) be32_to_cpu(ioread32(addr)) 484#define ioread32be(addr) be32_to_cpu(ioread32(addr))
377#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr)) 485#define iowrite16be(v, addr) iowrite16(be16_to_cpu(v), (addr))
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index c96f9bbb760d..71af5747874d 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -124,6 +124,12 @@
124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask); 124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR) 125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
126 126
127#ifdef CONFIG_DEBUG_PREEMPT
128/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
129extern unsigned int debug_smp_processor_id(void);
130# define smp_processor_id() debug_smp_processor_id()
131#endif
132
127/* Disable interrupts. */ 133/* Disable interrupts. */
128#define arch_local_irq_disable() \ 134#define arch_local_irq_disable() \
129 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS) 135 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
@@ -132,9 +138,18 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
132#define arch_local_irq_disable_all() \ 138#define arch_local_irq_disable_all() \
133 interrupt_mask_set_mask(-1ULL) 139 interrupt_mask_set_mask(-1ULL)
134 140
141/*
142 * Read the set of maskable interrupts.
143 * We avoid the preemption warning here via __this_cpu_ptr since even
144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask.
146 */
147#define arch_local_irqs_enabled() \
148 (*__this_cpu_ptr(&interrupts_enabled_mask))
149
135/* Re-enable all maskable interrupts. */ 150/* Re-enable all maskable interrupts. */
136#define arch_local_irq_enable() \ 151#define arch_local_irq_enable() \
137 interrupt_mask_reset_mask(__get_cpu_var(interrupts_enabled_mask)) 152 interrupt_mask_reset_mask(arch_local_irqs_enabled())
138 153
139/* Disable or enable interrupts based on flag argument. */ 154/* Disable or enable interrupts based on flag argument. */
140#define arch_local_irq_restore(disabled) do { \ 155#define arch_local_irq_restore(disabled) do { \
@@ -161,7 +176,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
161 176
162/* Prevent the given interrupt from being enabled next time we enable irqs. */ 177/* Prevent the given interrupt from being enabled next time we enable irqs. */
163#define arch_local_irq_mask(interrupt) \ 178#define arch_local_irq_mask(interrupt) \
164 (__get_cpu_var(interrupts_enabled_mask) &= ~(1ULL << (interrupt))) 179 this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
165 180
166/* Prevent the given interrupt from being enabled immediately. */ 181/* Prevent the given interrupt from being enabled immediately. */
167#define arch_local_irq_mask_now(interrupt) do { \ 182#define arch_local_irq_mask_now(interrupt) do { \
@@ -171,7 +186,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
171 186
172/* Allow the given interrupt to be enabled next time we enable irqs. */ 187/* Allow the given interrupt to be enabled next time we enable irqs. */
173#define arch_local_irq_unmask(interrupt) \ 188#define arch_local_irq_unmask(interrupt) \
174 (__get_cpu_var(interrupts_enabled_mask) |= (1ULL << (interrupt))) 189 this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
175 190
176/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */ 191/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
177#define arch_local_irq_unmask_now(interrupt) do { \ 192#define arch_local_irq_unmask_now(interrupt) do { \
diff --git a/arch/tile/include/asm/hw_irq.h b/arch/tile/include/asm/kdebug.h
index 4fac5fbf333e..5bbbfa904c2d 100644
--- a/arch/tile/include/asm/hw_irq.h
+++ b/arch/tile/include/asm/kdebug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -12,7 +12,17 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#ifndef _ASM_TILE_HW_IRQ_H 15#ifndef _ASM_TILE_KDEBUG_H
16#define _ASM_TILE_HW_IRQ_H 16#define _ASM_TILE_KDEBUG_H
17 17
18#endif /* _ASM_TILE_HW_IRQ_H */ 18#include <linux/notifier.h>
19
20enum die_val {
21 DIE_OOPS = 1,
22 DIE_BREAK,
23 DIE_SSTEPBP,
24 DIE_PAGE_FAULT,
25 DIE_COMPILED_BPT
26};
27
28#endif /* _ASM_TILE_KDEBUG_H */
diff --git a/arch/tile/include/asm/kgdb.h b/arch/tile/include/asm/kgdb.h
new file mode 100644
index 000000000000..280c181cf0db
--- /dev/null
+++ b/arch/tile/include/asm/kgdb.h
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE-Gx KGDB support.
15 */
16
17#ifndef __TILE_KGDB_H__
18#define __TILE_KGDB_H__
19
20#include <linux/kdebug.h>
21#include <arch/opcode.h>
22
23#define GDB_SIZEOF_REG sizeof(unsigned long)
24
25/*
26 * TILE-Gx gdb is expecting the following register layout:
27 * 56 GPRs(R0 - R52, TP, SP, LR), 8 special GPRs(networks and ZERO),
28 * plus the PC and the faultnum.
29 *
30 * Even though kernel not use the 8 special GPRs, they need to be present
31 * in the registers sent for correct processing in the host-side gdb.
32 *
33 */
34#define DBG_MAX_REG_NUM (56+8+2)
35#define NUMREGBYTES (DBG_MAX_REG_NUM * GDB_SIZEOF_REG)
36
37/*
38 * BUFMAX defines the maximum number of characters in inbound/outbound
39 * buffers at least NUMREGBYTES*2 are needed for register packets,
40 * Longer buffer is needed to list all threads.
41 */
42#define BUFMAX 2048
43
44#define BREAK_INSTR_SIZE TILEGX_BUNDLE_SIZE_IN_BYTES
45
46/*
47 * Require cache flush for set/clear a software breakpoint or write memory.
48 */
49#define CACHE_FLUSH_IS_SAFE 1
50
51/*
52 * The compiled-in breakpoint instruction can be used to "break" into
53 * the debugger via magic system request key (sysrq-G).
54 */
55static tile_bundle_bits compiled_bpt = TILEGX_BPT_BUNDLE | DIE_COMPILED_BPT;
56
57enum tilegx_regnum {
58 TILEGX_PC_REGNUM = TREG_LAST_GPR + 9,
59 TILEGX_FAULTNUM_REGNUM,
60};
61
62/*
63 * Generate a breakpoint exception to "break" into the debugger.
64 */
65static inline void arch_kgdb_breakpoint(void)
66{
67 asm volatile (".quad %0\n\t"
68 ::""(compiled_bpt));
69}
70
71#endif /* __TILE_KGDB_H__ */
diff --git a/arch/tile/include/asm/kprobes.h b/arch/tile/include/asm/kprobes.h
new file mode 100644
index 000000000000..d8f9a83943b1
--- /dev/null
+++ b/arch/tile/include/asm/kprobes.h
@@ -0,0 +1,79 @@
1/*
2 * arch/tile/include/asm/kprobes.h
3 *
4 * Copyright 2012 Tilera Corporation. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation, version 2.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for
14 * more details.
15 */
16
17#ifndef _ASM_TILE_KPROBES_H
18#define _ASM_TILE_KPROBES_H
19
20#include <linux/types.h>
21#include <linux/ptrace.h>
22#include <linux/percpu.h>
23
24#include <arch/opcode.h>
25
26#define __ARCH_WANT_KPROBES_INSN_SLOT
27#define MAX_INSN_SIZE 2
28
29#define kretprobe_blacklist_size 0
30
31typedef tile_bundle_bits kprobe_opcode_t;
32
33#define flush_insn_slot(p) \
34 flush_icache_range((unsigned long)p->addr, \
35 (unsigned long)p->addr + \
36 (MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
37
38struct kprobe;
39
40/* Architecture specific copy of original instruction. */
41struct arch_specific_insn {
42 kprobe_opcode_t *insn;
43};
44
45struct prev_kprobe {
46 struct kprobe *kp;
47 unsigned long status;
48 unsigned long saved_pc;
49};
50
51#define MAX_JPROBES_STACK_SIZE 128
52#define MAX_JPROBES_STACK_ADDR \
53 (((unsigned long)current_thread_info()) + THREAD_SIZE - 32 \
54 - sizeof(struct pt_regs))
55
56#define MIN_JPROBES_STACK_SIZE(ADDR) \
57 ((((ADDR) + MAX_JPROBES_STACK_SIZE) > MAX_JPROBES_STACK_ADDR) \
58 ? MAX_JPROBES_STACK_ADDR - (ADDR) \
59 : MAX_JPROBES_STACK_SIZE)
60
61/* per-cpu kprobe control block. */
62struct kprobe_ctlblk {
63 unsigned long kprobe_status;
64 unsigned long kprobe_saved_pc;
65 unsigned long jprobe_saved_sp;
66 struct prev_kprobe prev_kprobe;
67 struct pt_regs jprobe_saved_regs;
68 char jprobes_stack[MAX_JPROBES_STACK_SIZE];
69};
70
71extern tile_bundle_bits breakpoint2_insn;
72extern tile_bundle_bits breakpoint_insn;
73
74void arch_remove_kprobe(struct kprobe *);
75
76extern int kprobe_exceptions_notify(struct notifier_block *self,
77 unsigned long val, void *data);
78
79#endif /* _ASM_TILE_KPROBES_H */
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index e2c789096795..0cab1182bde1 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -22,6 +22,7 @@ struct mm_context {
22 * semaphore but atomically, but it is conservatively set. 22 * semaphore but atomically, but it is conservatively set.
23 */ 23 */
24 unsigned long priority_cached; 24 unsigned long priority_cached;
25 unsigned long vdso_base;
25}; 26};
26 27
27typedef struct mm_context mm_context_t; 28typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 37f0b741dee7..4734215e2ad4 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -45,7 +45,7 @@ static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
45 45
46static inline void install_page_table(pgd_t *pgdir, int asid) 46static inline void install_page_table(pgd_t *pgdir, int asid)
47{ 47{
48 pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); 48 pte_t *ptep = virt_to_kpte((unsigned long)pgdir);
49 __install_page_table(pgdir, asid, *ptep); 49 __install_page_table(pgdir, asid, *ptep);
50} 50}
51 51
diff --git a/arch/tile/include/asm/mmzone.h b/arch/tile/include/asm/mmzone.h
index 9d3dbce8f953..804f1098b6cd 100644
--- a/arch/tile/include/asm/mmzone.h
+++ b/arch/tile/include/asm/mmzone.h
@@ -42,7 +42,7 @@ static inline int pfn_to_nid(unsigned long pfn)
42 42
43#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr) 43#define kern_addr_valid(kaddr) virt_addr_valid((void *)kaddr)
44 44
45static inline int pfn_valid(int pfn) 45static inline int pfn_valid(unsigned long pfn)
46{ 46{
47 int nid = pfn_to_nid(pfn); 47 int nid = pfn_to_nid(pfn);
48 48
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index dd033a4fd627..6346888f7bdc 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -39,6 +39,12 @@
39#define HPAGE_MASK (~(HPAGE_SIZE - 1)) 39#define HPAGE_MASK (~(HPAGE_SIZE - 1))
40 40
41/* 41/*
42 * We do define AT_SYSINFO_EHDR to support vDSO,
43 * but don't use the gate mechanism.
44 */
45#define __HAVE_ARCH_GATE_AREA 1
46
47/*
42 * If the Kconfig doesn't specify, set a maximum zone order that 48 * If the Kconfig doesn't specify, set a maximum zone order that
43 * is enough so that we can create huge pages from small pages given 49 * is enough so that we can create huge pages from small pages given
44 * the respective sizes of the two page types. See <linux/mmzone.h>. 50 * the respective sizes of the two page types. See <linux/mmzone.h>.
@@ -142,8 +148,12 @@ static inline __attribute_const__ int get_order(unsigned long size)
142#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 148#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
143#endif 149#endif
144 150
151/* Allow overriding how much VA or PA the kernel will use. */
152#define MAX_PA_WIDTH CHIP_PA_WIDTH()
153#define MAX_VA_WIDTH CHIP_VA_WIDTH()
154
145/* Each memory controller has PAs distinct in their high bits. */ 155/* Each memory controller has PAs distinct in their high bits. */
146#define NR_PA_HIGHBIT_SHIFT (CHIP_PA_WIDTH() - CHIP_LOG_NUM_MSHIMS()) 156#define NR_PA_HIGHBIT_SHIFT (MAX_PA_WIDTH - CHIP_LOG_NUM_MSHIMS())
147#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS()) 157#define NR_PA_HIGHBIT_VALUES (1 << CHIP_LOG_NUM_MSHIMS())
148#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT) 158#define __pa_to_highbits(pa) ((phys_addr_t)(pa) >> NR_PA_HIGHBIT_SHIFT)
149#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT)) 159#define __pfn_to_highbits(pfn) ((pfn) >> (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT))
@@ -154,7 +164,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
154 * We reserve the lower half of memory for user-space programs, and the 164 * We reserve the lower half of memory for user-space programs, and the
155 * upper half for system code. We re-map all of physical memory in the 165 * upper half for system code. We re-map all of physical memory in the
156 * upper half, which takes a quarter of our VA space. Then we have 166 * upper half, which takes a quarter of our VA space. Then we have
157 * the vmalloc regions. The supervisor code lives at 0xfffffff700000000, 167 * the vmalloc regions. The supervisor code lives at the highest address,
158 * with the hypervisor above that. 168 * with the hypervisor above that.
159 * 169 *
160 * Loadable kernel modules are placed immediately after the static 170 * Loadable kernel modules are placed immediately after the static
@@ -166,26 +176,19 @@ static inline __attribute_const__ int get_order(unsigned long size)
166 * Similarly, for now we don't play any struct page mapping games. 176 * Similarly, for now we don't play any struct page mapping games.
167 */ 177 */
168 178
169#if CHIP_PA_WIDTH() + 2 > CHIP_VA_WIDTH() 179#if MAX_PA_WIDTH + 2 > MAX_VA_WIDTH
170# error Too much PA to map with the VA available! 180# error Too much PA to map with the VA available!
171#endif 181#endif
172#define HALF_VA_SPACE (_AC(1, UL) << (CHIP_VA_WIDTH() - 1))
173 182
174#define MEM_LOW_END (HALF_VA_SPACE - 1) /* low half */ 183#define PAGE_OFFSET (-(_AC(1, UL) << (MAX_VA_WIDTH - 1)))
175#define MEM_HIGH_START (-HALF_VA_SPACE) /* high half */ 184#define KERNEL_HIGH_VADDR _AC(0xfffffff800000000, UL) /* high 32GB */
176#define PAGE_OFFSET MEM_HIGH_START 185#define FIXADDR_BASE (KERNEL_HIGH_VADDR - 0x400000000) /* 4 GB */
177#define FIXADDR_BASE _AC(0xfffffff400000000, UL) /* 4 GB */ 186#define FIXADDR_TOP (KERNEL_HIGH_VADDR - 0x300000000) /* 4 GB */
178#define FIXADDR_TOP _AC(0xfffffff500000000, UL) /* 4 GB */
179#define _VMALLOC_START FIXADDR_TOP 187#define _VMALLOC_START FIXADDR_TOP
180#define HUGE_VMAP_BASE _AC(0xfffffff600000000, UL) /* 4 GB */ 188#define HUGE_VMAP_BASE (KERNEL_HIGH_VADDR - 0x200000000) /* 4 GB */
181#define MEM_SV_START _AC(0xfffffff700000000, UL) /* 256 MB */ 189#define MEM_SV_START (KERNEL_HIGH_VADDR - 0x100000000) /* 256 MB */
182#define MEM_SV_INTRPT MEM_SV_START 190#define MEM_MODULE_START (MEM_SV_START + (256*1024*1024)) /* 256 MB */
183#define MEM_MODULE_START _AC(0xfffffff710000000, UL) /* 256 MB */
184#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024)) 191#define MEM_MODULE_END (MEM_MODULE_START + (256*1024*1024))
185#define MEM_HV_START _AC(0xfffffff800000000, UL) /* 32 GB */
186
187/* Highest DTLB address we will use */
188#define KERNEL_HIGH_VADDR MEM_SV_START
189 192
190#else /* !__tilegx__ */ 193#else /* !__tilegx__ */
191 194
@@ -207,25 +210,18 @@ static inline __attribute_const__ int get_order(unsigned long size)
207 * values, and after that, we show "typical" values, since the actual 210 * values, and after that, we show "typical" values, since the actual
208 * addresses depend on kernel #defines. 211 * addresses depend on kernel #defines.
209 * 212 *
210 * MEM_HV_INTRPT 0xfe000000 213 * MEM_HV_START 0xfe000000
211 * MEM_SV_INTRPT (kernel code) 0xfd000000 214 * MEM_SV_START (kernel code) 0xfd000000
212 * MEM_USER_INTRPT (user vector) 0xfc000000 215 * MEM_USER_INTRPT (user vector) 0xfc000000
213 * FIX_KMAP_xxx 0xf8000000 (via NR_CPUS * KM_TYPE_NR) 216 * FIX_KMAP_xxx 0xfa000000 (via NR_CPUS * KM_TYPE_NR)
214 * PKMAP_BASE 0xf7000000 (via LAST_PKMAP) 217 * PKMAP_BASE 0xf9000000 (via LAST_PKMAP)
215 * HUGE_VMAP 0xf3000000 (via CONFIG_NR_HUGE_VMAPS) 218 * VMALLOC_START 0xf7000000 (via VMALLOC_RESERVE)
216 * VMALLOC_START 0xf0000000 (via __VMALLOC_RESERVE)
217 * mapped LOWMEM 0xc0000000 219 * mapped LOWMEM 0xc0000000
218 */ 220 */
219 221
220#define MEM_USER_INTRPT _AC(0xfc000000, UL) 222#define MEM_USER_INTRPT _AC(0xfc000000, UL)
221#if CONFIG_KERNEL_PL == 1 223#define MEM_SV_START _AC(0xfd000000, UL)
222#define MEM_SV_INTRPT _AC(0xfd000000, UL) 224#define MEM_HV_START _AC(0xfe000000, UL)
223#define MEM_HV_INTRPT _AC(0xfe000000, UL)
224#else
225#define MEM_GUEST_INTRPT _AC(0xfd000000, UL)
226#define MEM_SV_INTRPT _AC(0xfe000000, UL)
227#define MEM_HV_INTRPT _AC(0xff000000, UL)
228#endif
229 225
230#define INTRPT_SIZE 0x4000 226#define INTRPT_SIZE 0x4000
231 227
@@ -246,7 +242,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
246 242
247#endif /* __tilegx__ */ 243#endif /* __tilegx__ */
248 244
249#ifndef __ASSEMBLY__ 245#if !defined(__ASSEMBLY__) && !defined(VDSO_BUILD)
250 246
251#ifdef CONFIG_HIGHMEM 247#ifdef CONFIG_HIGHMEM
252 248
@@ -332,6 +328,7 @@ static inline int pfn_valid(unsigned long pfn)
332 328
333struct mm_struct; 329struct mm_struct;
334extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 330extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
331extern pte_t *virt_to_kpte(unsigned long kaddr);
335 332
336#endif /* !__ASSEMBLY__ */ 333#endif /* !__ASSEMBLY__ */
337 334
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index 54a924208d3c..dfedd7ac7298 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -17,7 +17,6 @@
17 17
18#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/numa.h>
21#include <asm-generic/pci_iomap.h> 20#include <asm-generic/pci_iomap.h>
22 21
23#ifndef __tilegx__ 22#ifndef __tilegx__
@@ -29,7 +28,6 @@ struct pci_controller {
29 int index; /* PCI domain number */ 28 int index; /* PCI domain number */
30 struct pci_bus *root_bus; 29 struct pci_bus *root_bus;
31 30
32 int first_busno;
33 int last_busno; 31 int last_busno;
34 32
35 int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */ 33 int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
@@ -124,6 +122,11 @@ static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
124 * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit 122 * the CPA plus TILE_PCI_MEM_MAP_BASE_OFFSET. To support 32-bit
125 * devices, we create a separate map region that handles the low 123 * devices, we create a separate map region that handles the low
126 * 4GB. 124 * 4GB.
125 *
126 * This design lets us avoid the "PCI hole" problem where the host bridge
127 * won't pass DMA traffic with target addresses that happen to fall within the
128 * BAR space. This enables us to use all the physical memory for DMA, instead
129 * of wasting the same amount of physical memory as the BAR window size.
127 */ 130 */
128#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH()) 131#define TILE_PCI_MEM_MAP_BASE_OFFSET (1ULL << CHIP_PA_WIDTH())
129 132
@@ -145,6 +148,10 @@ struct pci_controller {
145 148
146 int pio_mem_index; /* PIO region index for memory access */ 149 int pio_mem_index; /* PIO region index for memory access */
147 150
151#ifdef CONFIG_TILE_PCI_IO
152 int pio_io_index; /* PIO region index for I/O space access */
153#endif
154
148 /* 155 /*
149 * Mem-Map regions for all the memory controllers so that Linux can 156 * Mem-Map regions for all the memory controllers so that Linux can
150 * map all of its physical memory space to the PCI bus. 157 * map all of its physical memory space to the PCI bus.
@@ -154,6 +161,10 @@ struct pci_controller {
154 int index; /* PCI domain number */ 161 int index; /* PCI domain number */
155 struct pci_bus *root_bus; 162 struct pci_bus *root_bus;
156 163
164 /* PCI I/O space resource for this controller. */
165 struct resource io_space;
166 char io_space_name[32];
167
157 /* PCI memory space resource for this controller. */ 168 /* PCI memory space resource for this controller. */
158 struct resource mem_space; 169 struct resource mem_space;
159 char mem_space_name[32]; 170 char mem_space_name[32];
@@ -166,13 +177,11 @@ struct pci_controller {
166 177
167 /* Table that maps the INTx numbers to Linux irq numbers. */ 178 /* Table that maps the INTx numbers to Linux irq numbers. */
168 int irq_intx_table[4]; 179 int irq_intx_table[4];
169
170 /* Address ranges that are routed to this controller/bridge. */
171 struct resource mem_resources[3];
172}; 180};
173 181
174extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; 182extern struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
175extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; 183extern gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
184extern int num_trio_shims;
176 185
177extern void pci_iounmap(struct pci_dev *dev, void __iomem *); 186extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
178 187
@@ -211,7 +220,8 @@ static inline int pcibios_assign_all_busses(void)
211} 220}
212 221
213#define PCIBIOS_MIN_MEM 0 222#define PCIBIOS_MIN_MEM 0
214#define PCIBIOS_MIN_IO 0 223/* Minimum PCI I/O address, starting at the page boundary. */
224#define PCIBIOS_MIN_IO PAGE_SIZE
215 225
216/* Use any cpu for PCI. */ 226/* Use any cpu for PCI. */
217#define cpumask_of_pcibus(bus) cpu_online_mask 227#define cpumask_of_pcibus(bus) cpu_online_mask
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 4ce4a7a99c24..63142ab3b3dd 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -84,10 +84,12 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */;
84/* We have no pmd or pud since we are strictly a two-level page table */ 84/* We have no pmd or pud since we are strictly a two-level page table */
85#include <asm-generic/pgtable-nopmd.h> 85#include <asm-generic/pgtable-nopmd.h>
86 86
87static inline int pud_huge_page(pud_t pud) { return 0; }
88
87/* We don't define any pgds for these addresses. */ 89/* We don't define any pgds for these addresses. */
88static inline int pgd_addr_invalid(unsigned long addr) 90static inline int pgd_addr_invalid(unsigned long addr)
89{ 91{
90 return addr >= MEM_HV_INTRPT; 92 return addr >= MEM_HV_START;
91} 93}
92 94
93/* 95/*
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index 2492fa5478e7..3421177f7370 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -63,6 +63,15 @@
63/* We have no pud since we are a three-level page table. */ 63/* We have no pud since we are a three-level page table. */
64#include <asm-generic/pgtable-nopud.h> 64#include <asm-generic/pgtable-nopud.h>
65 65
66/*
67 * pmds are the same as pgds and ptes, so converting is a no-op.
68 */
69#define pmd_pte(pmd) (pmd)
70#define pmdp_ptep(pmdp) (pmdp)
71#define pte_pmd(pte) (pte)
72
73#define pud_pte(pud) ((pud).pgd)
74
66static inline int pud_none(pud_t pud) 75static inline int pud_none(pud_t pud)
67{ 76{
68 return pud_val(pud) == 0; 77 return pud_val(pud) == 0;
@@ -73,6 +82,11 @@ static inline int pud_present(pud_t pud)
73 return pud_val(pud) & _PAGE_PRESENT; 82 return pud_val(pud) & _PAGE_PRESENT;
74} 83}
75 84
85static inline int pud_huge_page(pud_t pud)
86{
87 return pud_val(pud) & _PAGE_HUGE_PAGE;
88}
89
76#define pmd_ERROR(e) \ 90#define pmd_ERROR(e) \
77 pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e)) 91 pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
78 92
@@ -89,6 +103,9 @@ static inline int pud_bad(pud_t pud)
89/* Return the page-table frame number (ptfn) that a pud_t points at. */ 103/* Return the page-table frame number (ptfn) that a pud_t points at. */
90#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd) 104#define pud_ptfn(pud) hv_pte_get_ptfn((pud).pgd)
91 105
106/* Return the page frame number (pfn) that a pud_t points at. */
107#define pud_pfn(pud) pte_pfn(pud_pte(pud))
108
92/* 109/*
93 * A given kernel pud_t maps to a kernel pmd_t table at a specific 110 * A given kernel pud_t maps to a kernel pmd_t table at a specific
94 * virtual address. Since kernel pmd_t tables can be aligned at 111 * virtual address. Since kernel pmd_t tables can be aligned at
@@ -123,8 +140,7 @@ static inline unsigned long pgd_addr_normalize(unsigned long addr)
123/* We don't define any pgds for these addresses. */ 140/* We don't define any pgds for these addresses. */
124static inline int pgd_addr_invalid(unsigned long addr) 141static inline int pgd_addr_invalid(unsigned long addr)
125{ 142{
126 return addr >= MEM_HV_START || 143 return addr >= KERNEL_HIGH_VADDR || addr != pgd_addr_normalize(addr);
127 (addr > MEM_LOW_END && addr < MEM_HIGH_START);
128} 144}
129 145
130/* 146/*
@@ -152,13 +168,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
152 return hv_pte(__insn_exch(&ptep->val, 0UL)); 168 return hv_pte(__insn_exch(&ptep->val, 0UL));
153} 169}
154 170
155/*
156 * pmds are the same as pgds and ptes, so converting is a no-op.
157 */
158#define pmd_pte(pmd) (pmd)
159#define pmdp_ptep(pmdp) (pmdp)
160#define pte_pmd(pte) (pte)
161
162#endif /* __ASSEMBLY__ */ 171#endif /* __ASSEMBLY__ */
163 172
164#endif /* _ASM_TILE_PGTABLE_64_H */ 173#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index b3f104953da2..42323636c459 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -15,6 +15,8 @@
15#ifndef _ASM_TILE_PROCESSOR_H 15#ifndef _ASM_TILE_PROCESSOR_H
16#define _ASM_TILE_PROCESSOR_H 16#define _ASM_TILE_PROCESSOR_H
17 17
18#include <arch/chip.h>
19
18#ifndef __ASSEMBLY__ 20#ifndef __ASSEMBLY__
19 21
20/* 22/*
@@ -25,7 +27,6 @@
25#include <asm/ptrace.h> 27#include <asm/ptrace.h>
26#include <asm/percpu.h> 28#include <asm/percpu.h>
27 29
28#include <arch/chip.h>
29#include <arch/spr_def.h> 30#include <arch/spr_def.h>
30 31
31struct task_struct; 32struct task_struct;
@@ -110,18 +111,16 @@ struct thread_struct {
110 unsigned long long interrupt_mask; 111 unsigned long long interrupt_mask;
111 /* User interrupt-control 0 state */ 112 /* User interrupt-control 0 state */
112 unsigned long intctrl_0; 113 unsigned long intctrl_0;
113#if CHIP_HAS_PROC_STATUS_SPR() 114 /* Is this task currently doing a backtrace? */
115 bool in_backtrace;
114 /* Any other miscellaneous processor state bits */ 116 /* Any other miscellaneous processor state bits */
115 unsigned long proc_status; 117 unsigned long proc_status;
116#endif
117#if !CHIP_HAS_FIXED_INTVEC_BASE() 118#if !CHIP_HAS_FIXED_INTVEC_BASE()
118 /* Interrupt base for PL0 interrupts */ 119 /* Interrupt base for PL0 interrupts */
119 unsigned long interrupt_vector_base; 120 unsigned long interrupt_vector_base;
120#endif 121#endif
121#if CHIP_HAS_TILE_RTF_HWM()
122 /* Tile cache retry fifo high-water mark */ 122 /* Tile cache retry fifo high-water mark */
123 unsigned long tile_rtf_hwm; 123 unsigned long tile_rtf_hwm;
124#endif
125#if CHIP_HAS_DSTREAM_PF() 124#if CHIP_HAS_DSTREAM_PF()
126 /* Data stream prefetch control */ 125 /* Data stream prefetch control */
127 unsigned long dstream_pf; 126 unsigned long dstream_pf;
@@ -134,21 +133,16 @@ struct thread_struct {
134 /* Async DMA TLB fault information */ 133 /* Async DMA TLB fault information */
135 struct async_tlb dma_async_tlb; 134 struct async_tlb dma_async_tlb;
136#endif 135#endif
137#if CHIP_HAS_SN_PROC()
138 /* Was static network processor when we were switched out? */
139 int sn_proc_running;
140 /* Async SNI TLB fault information */
141 struct async_tlb sn_async_tlb;
142#endif
143}; 136};
144 137
145#endif /* !__ASSEMBLY__ */ 138#endif /* !__ASSEMBLY__ */
146 139
147/* 140/*
148 * Start with "sp" this many bytes below the top of the kernel stack. 141 * Start with "sp" this many bytes below the top of the kernel stack.
149 * This preserves the invariant that a called function may write to *sp. 142 * This allows us to be cache-aware when handling the initial save
143 * of the pt_regs value to the stack.
150 */ 144 */
151#define STACK_TOP_DELTA 8 145#define STACK_TOP_DELTA 64
152 146
153/* 147/*
154 * When entering the kernel via a fault, start with the top of the 148 * When entering the kernel via a fault, start with the top of the
@@ -164,7 +158,7 @@ struct thread_struct {
164#ifndef __ASSEMBLY__ 158#ifndef __ASSEMBLY__
165 159
166#ifdef __tilegx__ 160#ifdef __tilegx__
167#define TASK_SIZE_MAX (MEM_LOW_END + 1) 161#define TASK_SIZE_MAX (_AC(1, UL) << (MAX_VA_WIDTH - 1))
168#else 162#else
169#define TASK_SIZE_MAX PAGE_OFFSET 163#define TASK_SIZE_MAX PAGE_OFFSET
170#endif 164#endif
@@ -178,10 +172,10 @@ struct thread_struct {
178#define TASK_SIZE TASK_SIZE_MAX 172#define TASK_SIZE TASK_SIZE_MAX
179#endif 173#endif
180 174
181/* We provide a minimal "vdso" a la x86; just the sigreturn code for now. */ 175#define VDSO_BASE ((unsigned long)current->active_mm->context.vdso_base)
182#define VDSO_BASE (TASK_SIZE - PAGE_SIZE) 176#define VDSO_SYM(x) (VDSO_BASE + (unsigned long)(x))
183 177
184#define STACK_TOP VDSO_BASE 178#define STACK_TOP TASK_SIZE
185 179
186/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */ 180/* STACK_TOP_MAX is used temporarily in execve and should not check COMPAT. */
187#define STACK_TOP_MAX TASK_SIZE_MAX 181#define STACK_TOP_MAX TASK_SIZE_MAX
@@ -232,21 +226,28 @@ extern int do_work_pending(struct pt_regs *regs, u32 flags);
232unsigned long get_wchan(struct task_struct *p); 226unsigned long get_wchan(struct task_struct *p);
233 227
234/* Return initial ksp value for given task. */ 228/* Return initial ksp value for given task. */
235#define task_ksp0(task) ((unsigned long)(task)->stack + THREAD_SIZE) 229#define task_ksp0(task) \
230 ((unsigned long)(task)->stack + THREAD_SIZE - STACK_TOP_DELTA)
236 231
237/* Return some info about the user process TASK. */ 232/* Return some info about the user process TASK. */
238#define KSTK_TOP(task) (task_ksp0(task) - STACK_TOP_DELTA)
239#define task_pt_regs(task) \ 233#define task_pt_regs(task) \
240 ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1) 234 ((struct pt_regs *)(task_ksp0(task) - KSTK_PTREGS_GAP) - 1)
241#define current_pt_regs() \ 235#define current_pt_regs() \
242 ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \ 236 ((struct pt_regs *)((stack_pointer | (THREAD_SIZE - 1)) - \
243 (KSTK_PTREGS_GAP - 1)) - 1) 237 STACK_TOP_DELTA - (KSTK_PTREGS_GAP - 1)) - 1)
244#define task_sp(task) (task_pt_regs(task)->sp) 238#define task_sp(task) (task_pt_regs(task)->sp)
245#define task_pc(task) (task_pt_regs(task)->pc) 239#define task_pc(task) (task_pt_regs(task)->pc)
246/* Aliases for pc and sp (used in fs/proc/array.c) */ 240/* Aliases for pc and sp (used in fs/proc/array.c) */
247#define KSTK_EIP(task) task_pc(task) 241#define KSTK_EIP(task) task_pc(task)
248#define KSTK_ESP(task) task_sp(task) 242#define KSTK_ESP(task) task_sp(task)
249 243
244/* Fine-grained unaligned JIT support */
245#define GET_UNALIGN_CTL(tsk, adr) get_unalign_ctl((tsk), (adr))
246#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
247
248extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
249extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
250
250/* Standard format for printing registers and other word-size data. */ 251/* Standard format for printing registers and other word-size data. */
251#ifdef __tilegx__ 252#ifdef __tilegx__
252# define REGFMT "0x%016lx" 253# define REGFMT "0x%016lx"
@@ -275,7 +276,6 @@ extern char chip_model[64];
275/* Data on which physical memory controller corresponds to which NUMA node. */ 276/* Data on which physical memory controller corresponds to which NUMA node. */
276extern int node_controller[]; 277extern int node_controller[];
277 278
278#if CHIP_HAS_CBOX_HOME_MAP()
279/* Does the heap allocator return hash-for-home pages by default? */ 279/* Does the heap allocator return hash-for-home pages by default? */
280extern int hash_default; 280extern int hash_default;
281 281
@@ -285,11 +285,6 @@ extern int kstack_hash;
285/* Does MAP_ANONYMOUS return hash-for-home pages by default? */ 285/* Does MAP_ANONYMOUS return hash-for-home pages by default? */
286#define uheap_hash hash_default 286#define uheap_hash hash_default
287 287
288#else
289#define hash_default 0
290#define kstack_hash 0
291#define uheap_hash 0
292#endif
293 288
294/* Are we using huge pages in the TLB for kernel data? */ 289/* Are we using huge pages in the TLB for kernel data? */
295extern int kdata_huge; 290extern int kdata_huge;
@@ -337,7 +332,6 @@ extern int kdata_huge;
337 332
338/* 333/*
339 * Provide symbolic constants for PLs. 334 * Provide symbolic constants for PLs.
340 * Note that assembly code assumes that USER_PL is zero.
341 */ 335 */
342#define USER_PL 0 336#define USER_PL 0
343#if CONFIG_KERNEL_PL == 2 337#if CONFIG_KERNEL_PL == 2
@@ -346,20 +340,38 @@ extern int kdata_huge;
346#define KERNEL_PL CONFIG_KERNEL_PL 340#define KERNEL_PL CONFIG_KERNEL_PL
347 341
348/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */ 342/* SYSTEM_SAVE_K_0 holds the current cpu number ORed with ksp0. */
349#define CPU_LOG_MASK_VALUE 12 343#ifdef __tilegx__
350#define CPU_MASK_VALUE ((1 << CPU_LOG_MASK_VALUE) - 1) 344#define CPU_SHIFT 48
351#if CONFIG_NR_CPUS > CPU_MASK_VALUE 345#if CHIP_VA_WIDTH() > CPU_SHIFT
352# error Too many cpus! 346# error Too many VA bits!
353#endif 347#endif
348#define MAX_CPU_ID ((1 << (64 - CPU_SHIFT)) - 1)
349#define raw_smp_processor_id() \
350 ((int)(__insn_mfspr(SPR_SYSTEM_SAVE_K_0) >> CPU_SHIFT))
351#define get_current_ksp0() \
352 ((unsigned long)(((long)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) << \
353 (64 - CPU_SHIFT)) >> (64 - CPU_SHIFT)))
354#define next_current_ksp0(task) ({ \
355 unsigned long __ksp0 = task_ksp0(task) & ((1UL << CPU_SHIFT) - 1); \
356 unsigned long __cpu = (long)raw_smp_processor_id() << CPU_SHIFT; \
357 __ksp0 | __cpu; \
358})
359#else
360#define LOG2_NR_CPU_IDS 6
361#define MAX_CPU_ID ((1 << LOG2_NR_CPU_IDS) - 1)
354#define raw_smp_processor_id() \ 362#define raw_smp_processor_id() \
355 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & CPU_MASK_VALUE) 363 ((int)__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & MAX_CPU_ID)
356#define get_current_ksp0() \ 364#define get_current_ksp0() \
357 (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~CPU_MASK_VALUE) 365 (__insn_mfspr(SPR_SYSTEM_SAVE_K_0) & ~MAX_CPU_ID)
358#define next_current_ksp0(task) ({ \ 366#define next_current_ksp0(task) ({ \
359 unsigned long __ksp0 = task_ksp0(task); \ 367 unsigned long __ksp0 = task_ksp0(task); \
360 int __cpu = raw_smp_processor_id(); \ 368 int __cpu = raw_smp_processor_id(); \
361 BUG_ON(__ksp0 & CPU_MASK_VALUE); \ 369 BUG_ON(__ksp0 & MAX_CPU_ID); \
362 __ksp0 | __cpu; \ 370 __ksp0 | __cpu; \
363}) 371})
372#endif
373#if CONFIG_NR_CPUS > (MAX_CPU_ID + 1)
374# error Too many cpus!
375#endif
364 376
365#endif /* _ASM_TILE_PROCESSOR_H */ 377#endif /* _ASM_TILE_PROCESSOR_H */
diff --git a/arch/tile/include/asm/ptrace.h b/arch/tile/include/asm/ptrace.h
index fd412260aff7..b9620c077abc 100644
--- a/arch/tile/include/asm/ptrace.h
+++ b/arch/tile/include/asm/ptrace.h
@@ -33,12 +33,13 @@ typedef unsigned long pt_reg_t;
33 33
34#ifndef __ASSEMBLY__ 34#ifndef __ASSEMBLY__
35 35
36#define regs_return_value(regs) ((regs)->regs[0])
36#define instruction_pointer(regs) ((regs)->pc) 37#define instruction_pointer(regs) ((regs)->pc)
37#define profile_pc(regs) instruction_pointer(regs) 38#define profile_pc(regs) instruction_pointer(regs)
38#define user_stack_pointer(regs) ((regs)->sp) 39#define user_stack_pointer(regs) ((regs)->sp)
39 40
40/* Does the process account for user or for system time? */ 41/* Does the process account for user or for system time? */
41#define user_mode(regs) (EX1_PL((regs)->ex1) == USER_PL) 42#define user_mode(regs) (EX1_PL((regs)->ex1) < KERNEL_PL)
42 43
43/* Fill in a struct pt_regs with the current kernel registers. */ 44/* Fill in a struct pt_regs with the current kernel registers. */
44struct pt_regs *get_pt_regs(struct pt_regs *); 45struct pt_regs *get_pt_regs(struct pt_regs *);
@@ -79,8 +80,7 @@ extern void single_step_execve(void);
79 80
80struct task_struct; 81struct task_struct;
81 82
82extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, 83extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs);
83 int error_code);
84 84
85#ifdef __tilegx__ 85#ifdef __tilegx__
86/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */ 86/* We need this since sigval_t has a user pointer in it, for GETSIGINFO etc. */
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 7d8a935a9238..5d5d3b739a6b 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -25,10 +25,16 @@ extern char _sinitdata[], _einitdata[];
25/* Write-once data is writable only till the end of initialization. */ 25/* Write-once data is writable only till the end of initialization. */
26extern char __w1data_begin[], __w1data_end[]; 26extern char __w1data_begin[], __w1data_end[];
27 27
28extern char vdso_start[], vdso_end[];
29#ifdef CONFIG_COMPAT
30extern char vdso32_start[], vdso32_end[];
31#endif
28 32
29/* Not exactly sections, but PC comparison points in the code. */ 33/* Not exactly sections, but PC comparison points in the code. */
30extern char __rt_sigreturn[], __rt_sigreturn_end[]; 34extern char __rt_sigreturn[], __rt_sigreturn_end[];
31#ifndef __tilegx__ 35#ifdef __tilegx__
36extern char __start_unalign_asm_code[], __end_unalign_asm_code[];
37#else
32extern char sys_cmpxchg[], __sys_cmpxchg_end[]; 38extern char sys_cmpxchg[], __sys_cmpxchg_end[];
33extern char __sys_cmpxchg_grab_lock[]; 39extern char __sys_cmpxchg_grab_lock[];
34extern char __start_atomic_asm_code[], __end_atomic_asm_code[]; 40extern char __start_atomic_asm_code[], __end_atomic_asm_code[];
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index d048888c5d9a..e98909033e5b 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -24,9 +24,8 @@
24 */ 24 */
25#define MAXMEM_PFN PFN_DOWN(MAXMEM) 25#define MAXMEM_PFN PFN_DOWN(MAXMEM)
26 26
27int tile_console_write(const char *buf, int count);
27void early_panic(const char *fmt, ...); 28void early_panic(const char *fmt, ...);
28void warn_early_printk(void);
29void __init disable_early_printk(void);
30 29
31/* Init-time routine to do tile-specific per-cpu setup. */ 30/* Init-time routine to do tile-specific per-cpu setup. */
32void setup_cpu(int boot); 31void setup_cpu(int boot);
diff --git a/arch/tile/include/asm/smp.h b/arch/tile/include/asm/smp.h
index 1aa759aeb5b3..9a326b64f7ae 100644
--- a/arch/tile/include/asm/smp.h
+++ b/arch/tile/include/asm/smp.h
@@ -101,10 +101,8 @@ void print_disabled_cpus(void);
101extern struct cpumask cpu_lotar_map; 101extern struct cpumask cpu_lotar_map;
102#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map) 102#define cpu_is_valid_lotar(cpu) cpumask_test_cpu((cpu), &cpu_lotar_map)
103 103
104#if CHIP_HAS_CBOX_HOME_MAP()
105/* Which processors are used for hash-for-home mapping */ 104/* Which processors are used for hash-for-home mapping */
106extern struct cpumask hash_for_home_map; 105extern struct cpumask hash_for_home_map;
107#endif
108 106
109/* Which cpus can have their cache flushed by hv_flush_remote(). */ 107/* Which cpus can have their cache flushed by hv_flush_remote(). */
110extern struct cpumask cpu_cacheable_map; 108extern struct cpumask cpu_cacheable_map;
diff --git a/arch/tile/include/asm/spinlock_64.h b/arch/tile/include/asm/spinlock_64.h
index 5f8b6a095fd8..9a12b9c7e5d3 100644
--- a/arch/tile/include/asm/spinlock_64.h
+++ b/arch/tile/include/asm/spinlock_64.h
@@ -27,7 +27,7 @@
27 * Return the "current" portion of a ticket lock value, 27 * Return the "current" portion of a ticket lock value,
28 * i.e. the number that currently owns the lock. 28 * i.e. the number that currently owns the lock.
29 */ 29 */
30static inline int arch_spin_current(u32 val) 30static inline u32 arch_spin_current(u32 val)
31{ 31{
32 return val >> __ARCH_SPIN_CURRENT_SHIFT; 32 return val >> __ARCH_SPIN_CURRENT_SHIFT;
33} 33}
@@ -36,7 +36,7 @@ static inline int arch_spin_current(u32 val)
36 * Return the "next" portion of a ticket lock value, 36 * Return the "next" portion of a ticket lock value,
37 * i.e. the number that the next task to try to acquire the lock will get. 37 * i.e. the number that the next task to try to acquire the lock will get.
38 */ 38 */
39static inline int arch_spin_next(u32 val) 39static inline u32 arch_spin_next(u32 val)
40{ 40{
41 return val & __ARCH_SPIN_NEXT_MASK; 41 return val & __ARCH_SPIN_NEXT_MASK;
42} 42}
diff --git a/arch/tile/include/asm/string.h b/arch/tile/include/asm/string.h
index 7535cf1a30e4..92b271bd9ebd 100644
--- a/arch/tile/include/asm/string.h
+++ b/arch/tile/include/asm/string.h
@@ -21,8 +21,10 @@
21#define __HAVE_ARCH_MEMMOVE 21#define __HAVE_ARCH_MEMMOVE
22#define __HAVE_ARCH_STRCHR 22#define __HAVE_ARCH_STRCHR
23#define __HAVE_ARCH_STRLEN 23#define __HAVE_ARCH_STRLEN
24#define __HAVE_ARCH_STRNLEN
24 25
25extern __kernel_size_t strlen(const char *); 26extern __kernel_size_t strlen(const char *);
27extern __kernel_size_t strnlen(const char *, __kernel_size_t);
26extern char *strchr(const char *s, int c); 28extern char *strchr(const char *s, int c);
27extern void *memchr(const void *s, int c, size_t n); 29extern void *memchr(const void *s, int c, size_t n);
28extern void *memset(void *, int, __kernel_size_t); 30extern void *memset(void *, int, __kernel_size_t);
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index d1733dee98a2..b8aa6df3e102 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -39,6 +39,11 @@ struct thread_info {
39 struct restart_block restart_block; 39 struct restart_block restart_block;
40 struct single_step_state *step_state; /* single step state 40 struct single_step_state *step_state; /* single step state
41 (if non-zero) */ 41 (if non-zero) */
42 int align_ctl; /* controls unaligned access */
43#ifdef __tilegx__
44 unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
45 void __user *unalign_jit_base; /* unalign fixup JIT base */
46#endif
42}; 47};
43 48
44/* 49/*
@@ -56,6 +61,7 @@ struct thread_info {
56 .fn = do_no_restart_syscall, \ 61 .fn = do_no_restart_syscall, \
57 }, \ 62 }, \
58 .step_state = NULL, \ 63 .step_state = NULL, \
64 .align_ctl = 0, \
59} 65}
60 66
61#define init_thread_info (init_thread_union.thread_info) 67#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index d5e86c9f74fd..d15c0d8d550f 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -89,9 +89,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
89#define topology_core_id(cpu) (cpu) 89#define topology_core_id(cpu) (cpu)
90#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) 90#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
91#define topology_thread_cpumask(cpu) cpumask_of(cpu) 91#define topology_thread_cpumask(cpu) cpumask_of(cpu)
92
93/* indicates that pointers to the topology struct cpumask maps are valid */
94#define arch_provides_topology_pointers yes
95#endif 92#endif
96 93
97#endif /* _ASM_TILE_TOPOLOGY_H */ 94#endif /* _ASM_TILE_TOPOLOGY_H */
diff --git a/arch/tile/include/asm/traps.h b/arch/tile/include/asm/traps.h
index e28c3df4176a..4b99a1c3aab2 100644
--- a/arch/tile/include/asm/traps.h
+++ b/arch/tile/include/asm/traps.h
@@ -15,12 +15,13 @@
15#ifndef _ASM_TILE_TRAPS_H 15#ifndef _ASM_TILE_TRAPS_H
16#define _ASM_TILE_TRAPS_H 16#define _ASM_TILE_TRAPS_H
17 17
18#ifndef __ASSEMBLY__
18#include <arch/chip.h> 19#include <arch/chip.h>
19 20
20/* mm/fault.c */ 21/* mm/fault.c */
21void do_page_fault(struct pt_regs *, int fault_num, 22void do_page_fault(struct pt_regs *, int fault_num,
22 unsigned long address, unsigned long write); 23 unsigned long address, unsigned long write);
23#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 24#if CHIP_HAS_TILE_DMA()
24void do_async_page_fault(struct pt_regs *); 25void do_async_page_fault(struct pt_regs *);
25#endif 26#endif
26 27
@@ -69,6 +70,16 @@ void gx_singlestep_handle(struct pt_regs *, int fault_num);
69 70
70/* kernel/intvec_64.S */ 71/* kernel/intvec_64.S */
71void fill_ra_stack(void); 72void fill_ra_stack(void);
73
74/* Handle unalign data fixup. */
75extern void do_unaligned(struct pt_regs *regs, int vecnum);
76#endif
77
78#endif /* __ASSEMBLY__ */
79
80#ifdef __tilegx__
81/* 128 byte JIT per unalign fixup. */
82#define UNALIGN_JIT_SHIFT 7
72#endif 83#endif
73 84
74#endif /* _ASM_TILE_TRAPS_H */ 85#endif /* _ASM_TILE_TRAPS_H */
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index e4d44bd7df27..b6cde3209b96 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -127,8 +127,10 @@ extern int fixup_exception(struct pt_regs *regs);
127 127
128#ifdef __LP64__ 128#ifdef __LP64__
129#define _ASM_PTR ".quad" 129#define _ASM_PTR ".quad"
130#define _ASM_ALIGN ".align 8"
130#else 131#else
131#define _ASM_PTR ".long" 132#define _ASM_PTR ".long"
133#define _ASM_ALIGN ".align 4"
132#endif 134#endif
133 135
134#define __get_user_asm(OP, x, ptr, ret) \ 136#define __get_user_asm(OP, x, ptr, ret) \
@@ -137,6 +139,7 @@ extern int fixup_exception(struct pt_regs *regs);
137 "0: { movei %1, 0; movei %0, %3 }\n" \ 139 "0: { movei %1, 0; movei %0, %3 }\n" \
138 "j 9f\n" \ 140 "j 9f\n" \
139 ".section __ex_table,\"a\"\n" \ 141 ".section __ex_table,\"a\"\n" \
142 _ASM_ALIGN "\n" \
140 _ASM_PTR " 1b, 0b\n" \ 143 _ASM_PTR " 1b, 0b\n" \
141 ".popsection\n" \ 144 ".popsection\n" \
142 "9:" \ 145 "9:" \
@@ -168,6 +171,7 @@ extern int fixup_exception(struct pt_regs *regs);
168 "0: { movei %1, 0; movei %2, 0 }\n" \ 171 "0: { movei %1, 0; movei %2, 0 }\n" \
169 "{ movei %0, %4; j 9f }\n" \ 172 "{ movei %0, %4; j 9f }\n" \
170 ".section __ex_table,\"a\"\n" \ 173 ".section __ex_table,\"a\"\n" \
174 ".align 4\n" \
171 ".word 1b, 0b\n" \ 175 ".word 1b, 0b\n" \
172 ".word 2b, 0b\n" \ 176 ".word 2b, 0b\n" \
173 ".popsection\n" \ 177 ".popsection\n" \
@@ -224,6 +228,7 @@ extern int __get_user_bad(void)
224 ".pushsection .fixup,\"ax\"\n" \ 228 ".pushsection .fixup,\"ax\"\n" \
225 "0: { movei %0, %3; j 9f }\n" \ 229 "0: { movei %0, %3; j 9f }\n" \
226 ".section __ex_table,\"a\"\n" \ 230 ".section __ex_table,\"a\"\n" \
231 _ASM_ALIGN "\n" \
227 _ASM_PTR " 1b, 0b\n" \ 232 _ASM_PTR " 1b, 0b\n" \
228 ".popsection\n" \ 233 ".popsection\n" \
229 "9:" \ 234 "9:" \
@@ -248,6 +253,7 @@ extern int __get_user_bad(void)
248 ".pushsection .fixup,\"ax\"\n" \ 253 ".pushsection .fixup,\"ax\"\n" \
249 "0: { movei %0, %4; j 9f }\n" \ 254 "0: { movei %0, %4; j 9f }\n" \
250 ".section __ex_table,\"a\"\n" \ 255 ".section __ex_table,\"a\"\n" \
256 ".align 4\n" \
251 ".word 1b, 0b\n" \ 257 ".word 1b, 0b\n" \
252 ".word 2b, 0b\n" \ 258 ".word 2b, 0b\n" \
253 ".popsection\n" \ 259 ".popsection\n" \
@@ -567,37 +573,6 @@ static inline unsigned long __must_check flush_user(
567} 573}
568 574
569/** 575/**
570 * inv_user: - Invalidate a block of memory in user space from cache.
571 * @mem: Destination address, in user space.
572 * @len: Number of bytes to invalidate.
573 *
574 * Returns number of bytes that could not be invalidated.
575 * On success, this will be zero.
576 *
577 * Note that on Tile64, the "inv" operation is in fact a
578 * "flush and invalidate", so cache write-backs will occur prior
579 * to the cache being marked invalid.
580 */
581extern unsigned long inv_user_asm(void __user *mem, unsigned long len);
582static inline unsigned long __must_check __inv_user(
583 void __user *mem, unsigned long len)
584{
585 int retval;
586
587 might_fault();
588 retval = inv_user_asm(mem, len);
589 mb_incoherent();
590 return retval;
591}
592static inline unsigned long __must_check inv_user(
593 void __user *mem, unsigned long len)
594{
595 if (access_ok(VERIFY_WRITE, mem, len))
596 return __inv_user(mem, len);
597 return len;
598}
599
600/**
601 * finv_user: - Flush-inval a block of memory in user space from cache. 576 * finv_user: - Flush-inval a block of memory in user space from cache.
602 * @mem: Destination address, in user space. 577 * @mem: Destination address, in user space.
603 * @len: Number of bytes to invalidate. 578 * @len: Number of bytes to invalidate.
diff --git a/arch/tile/include/asm/unaligned.h b/arch/tile/include/asm/unaligned.h
index 37dfbe598872..5a58a0d11449 100644
--- a/arch/tile/include/asm/unaligned.h
+++ b/arch/tile/include/asm/unaligned.h
@@ -15,11 +15,15 @@
15#ifndef _ASM_TILE_UNALIGNED_H 15#ifndef _ASM_TILE_UNALIGNED_H
16#define _ASM_TILE_UNALIGNED_H 16#define _ASM_TILE_UNALIGNED_H
17 17
18#include <linux/unaligned/le_struct.h> 18/*
19#include <linux/unaligned/be_byteshift.h> 19 * We could implement faster get_unaligned_[be/le]64 using the ldna
20#include <linux/unaligned/generic.h> 20 * instruction on tilegx; however, we need to either copy all of the
21#define get_unaligned __get_unaligned_le 21 * other generic functions to here (which is pretty ugly) or else
22#define put_unaligned __put_unaligned_le 22 * modify both the generic code and other arch code to allow arch
23 * specific unaligned data access functions. Given these functions
24 * are not often called, we'll stick with the generic version.
25 */
26#include <asm-generic/unaligned.h>
23 27
24/* 28/*
25 * Is the kernel doing fixups of unaligned accesses? If <0, no kernel 29 * Is the kernel doing fixups of unaligned accesses? If <0, no kernel
diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h
new file mode 100644
index 000000000000..9f6a78d665fa
--- /dev/null
+++ b/arch/tile/include/asm/vdso.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef __TILE_VDSO_H__
16#define __TILE_VDSO_H__
17
18#include <linux/types.h>
19
20/*
21 * Note about the vdso_data structure:
22 *
23 * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
24 * structure is supposed to be known only to the function in the vdso
25 * itself and may change without notice.
26 */
27
28struct vdso_data {
29 __u64 tz_update_count; /* Timezone atomicity ctr */
30 __u64 tb_update_count; /* Timebase atomicity ctr */
31 __u64 xtime_tod_stamp; /* TOD clock for xtime */
32 __u64 xtime_clock_sec; /* Kernel time second */
33 __u64 xtime_clock_nsec; /* Kernel time nanosecond */
34 __u64 wtom_clock_sec; /* Wall to monotonic clock second */
35 __u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
36 __u32 mult; /* Cycle to nanosecond multiplier */
37 __u32 shift; /* Cycle to nanosecond divisor (power of two) */
38 __u32 tz_minuteswest; /* Minutes west of Greenwich */
39 __u32 tz_dsttime; /* Type of dst correction */
40};
41
42extern struct vdso_data *vdso_data;
43
44/* __vdso_rt_sigreturn is defined with the addresses in the vdso page. */
45extern void __vdso_rt_sigreturn(void);
46
47extern int setup_vdso_pages(void);
48
49#endif /* __TILE_VDSO_H__ */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
index 9d50fce1b1a7..fdd07f88cfd7 100644
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -44,10 +44,13 @@
44#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210) 44#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
45#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211) 45#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
46#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212) 46#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
47#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
47 48
48#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e) 49#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
49#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f) 50#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
50#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220) 51#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
52#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
53#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
51#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000) 54#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
52#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001) 55#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
53#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 56#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
@@ -114,6 +117,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
114 117
115int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac); 118int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
116 119
120int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
121 uint32_t attr, int64_t val);
117 122
118int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec, 123int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
119 uint64_t * nsec, uint64_t * cycles); 124 uint64_t * nsec, uint64_t * cycles);
@@ -124,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
124int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context, 129int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
125 int64_t nsec); 130 int64_t nsec);
126 131
132int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
133 int32_t ppb);
134
127int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 135int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
128 136
129int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie); 137int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 0bcf3f71ce8b..476c5e5ca22c 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -27,11 +27,15 @@
27#include <asm/pgtable.h> 27#include <asm/pgtable.h>
28 28
29 29
30#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
30#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251) 31#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
31#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) 32#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
32#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) 33#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
33 34
34 35
36int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
37 _gxio_mpipe_link_name_t name);
38
35int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context, 39int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
36 unsigned int idx, 40 unsigned int idx,
37 _gxio_mpipe_link_name_t * name, 41 _gxio_mpipe_link_name_t * name,
diff --git a/arch/tile/include/gxio/iorpc_trio.h b/arch/tile/include/gxio/iorpc_trio.h
index 58105c31228b..d95b96fd6c93 100644
--- a/arch/tile/include/gxio/iorpc_trio.h
+++ b/arch/tile/include/gxio/iorpc_trio.h
@@ -30,6 +30,7 @@
30 30
31#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404) 31#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
32 32
33#define GXIO_TRIO_OP_ALLOC_SCATTER_QUEUES IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
33#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412) 34#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
34 35
35#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414) 36#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
@@ -54,6 +55,10 @@ int gxio_trio_alloc_memory_maps(gxio_trio_context_t * context,
54 unsigned int flags); 55 unsigned int flags);
55 56
56 57
58int gxio_trio_alloc_scatter_queues(gxio_trio_context_t * context,
59 unsigned int count, unsigned int first,
60 unsigned int flags);
61
57int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context, 62int gxio_trio_alloc_pio_regions(gxio_trio_context_t * context,
58 unsigned int count, unsigned int first, 63 unsigned int count, unsigned int first,
59 unsigned int flags); 64 unsigned int flags);
diff --git a/arch/tile/include/gxio/iorpc_uart.h b/arch/tile/include/gxio/iorpc_uart.h
new file mode 100644
index 000000000000..55429d48ea56
--- /dev/null
+++ b/arch/tile/include/gxio/iorpc_uart.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/* This file is machine-generated; DO NOT EDIT! */
16#ifndef __GXIO_UART_LINUX_RPC_H__
17#define __GXIO_UART_LINUX_RPC_H__
18
19#include <hv/iorpc.h>
20
21#include <hv/drv_uart_intf.h>
22#include <gxio/uart.h>
23#include <gxio/kiorpc.h>
24#include <linux/string.h>
25#include <linux/module.h>
26#include <asm/pgtable.h>
27
28#define GXIO_UART_OP_CFG_INTERRUPT IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1900)
29#define GXIO_UART_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
30#define GXIO_UART_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
31
32int gxio_uart_cfg_interrupt(gxio_uart_context_t *context, int inter_x,
33 int inter_y, int inter_ipi, int inter_event);
34
35int gxio_uart_get_mmio_base(gxio_uart_context_t *context, HV_PTE *base);
36
37int gxio_uart_check_mmio_offset(gxio_uart_context_t *context,
38 unsigned long offset, unsigned long size);
39
40#endif /* !__GXIO_UART_LINUX_RPC_H__ */
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
index b74f470ed11e..e37cf4f0cffd 100644
--- a/arch/tile/include/gxio/mpipe.h
+++ b/arch/tile/include/gxio/mpipe.h
@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
220 */ 220 */
221typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t; 221typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
222 222
223/*
224 * Max # of mpipe instances. 2 currently.
225 */
226#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
227
228#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
229
223/* Get the "va" field from an "idesc". 230/* Get the "va" field from an "idesc".
224 * 231 *
225 * This is the address at which the ingress hardware copied the first 232 * This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
311 /* File descriptor for calling up to Linux (and thus the HV). */ 318 /* File descriptor for calling up to Linux (and thus the HV). */
312 int fd; 319 int fd;
313 320
321 /* Corresponding mpipe instance #. */
322 int instance;
323
314 /* The VA at which configuration registers are mapped. */ 324 /* The VA at which configuration registers are mapped. */
315 char *mmio_cfg_base; 325 char *mmio_cfg_base;
316 326
@@ -810,7 +820,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
810/* Initialize an eDMA ring, using the given memory and size. 820/* Initialize an eDMA ring, using the given memory and size.
811 * 821 *
812 * @param context An initialized mPIPE context. 822 * @param context An initialized mPIPE context.
813 * @param ring The eDMA ring index. 823 * @param ering The eDMA ring index.
814 * @param channel The channel to use. This must be one of the channels 824 * @param channel The channel to use. This must be one of the channels
815 * associated with the context's set of open links. 825 * associated with the context's set of open links.
816 * @param mem A physically contiguous region of memory to be filled 826 * @param mem A physically contiguous region of memory to be filled
@@ -823,10 +833,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
823 * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure. 833 * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
824 */ 834 */
825extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context, 835extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
826 unsigned int ring, unsigned int channel, 836 unsigned int ering, unsigned int channel,
827 void *mem, size_t mem_size, 837 void *mem, size_t mem_size,
828 unsigned int mem_flags); 838 unsigned int mem_flags);
829 839
840/* Set the "max_blks", "min_snf_blks", and "db" fields of
841 * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
842 *
843 * The global pool of dynamic blocks will be automatically adjusted.
844 *
845 * This function should not be called after any egress has been done
846 * on the edma ring.
847 *
848 * Most applications should just use gxio_mpipe_equeue_set_snf_size().
849 *
850 * @param context An initialized mPIPE context.
851 * @param ering The eDMA ring index.
852 * @param max_blks The number of blocks to dedicate to the ring
853 * (normally min_snf_blks + 1). Must be greater than min_snf_blocks.
854 * @param min_snf_blks The number of blocks which must be stored
855 * prior to starting to send the packet (normally 12).
856 * @param db Whether to allow use of dynamic blocks by the ring
857 * (normally 1).
858 *
859 * @return 0 on success, negative on error.
860 */
861extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
862 unsigned int ering,
863 unsigned int max_blks,
864 unsigned int min_snf_blks,
865 unsigned int db);
866
830/***************************************************************** 867/*****************************************************************
831 * Classifier Program * 868 * Classifier Program *
832 ******************************************************************/ 869 ******************************************************************/
@@ -1288,15 +1325,39 @@ typedef struct {
1288 /* The log2() of the number of entries. */ 1325 /* The log2() of the number of entries. */
1289 unsigned long log2_num_entries; 1326 unsigned long log2_num_entries;
1290 1327
1328 /* The context. */
1329 gxio_mpipe_context_t *context;
1330
1331 /* The ering. */
1332 unsigned int ering;
1333
1334 /* The channel. */
1335 unsigned int channel;
1336
1291} gxio_mpipe_equeue_t; 1337} gxio_mpipe_equeue_t;
1292 1338
1293/* Initialize an "equeue". 1339/* Initialize an "equeue".
1294 * 1340 *
1295 * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring(). 1341 * This function uses gxio_mpipe_init_edma_ring() to initialize the
1342 * underlying edma_ring using the provided arguments.
1343 *
1344 * @param equeue An egress queue to be initialized.
1345 * @param context An initialized mPIPE context.
1346 * @param ering The eDMA ring index.
1347 * @param channel The channel to use. This must be one of the channels
1348 * associated with the context's set of open links.
1349 * @param mem A physically contiguous region of memory to be filled
1350 * with a ring of ::gxio_mpipe_edesc_t structures.
1351 * @param mem_size Number of bytes in the ring. Must be 512, 2048,
1352 * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
1353 * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
1354 *
1355 * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
1356 * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
1296 */ 1357 */
1297extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue, 1358extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
1298 gxio_mpipe_context_t *context, 1359 gxio_mpipe_context_t *context,
1299 unsigned int edma_ring_id, 1360 unsigned int ering,
1300 unsigned int channel, 1361 unsigned int channel,
1301 void *mem, unsigned int mem_size, 1362 void *mem, unsigned int mem_size,
1302 unsigned int mem_flags); 1363 unsigned int mem_flags);
@@ -1494,6 +1555,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
1494 completion_slot, update); 1555 completion_slot, update);
1495} 1556}
1496 1557
1558/* Set the snf (store and forward) size for an equeue.
1559 *
1560 * The snf size for an equeue defaults to 1536, and encodes the size
1561 * of the largest packet for which egress is guaranteed to avoid
1562 * transmission underruns and/or corrupt checksums under heavy load.
1563 *
1564 * The snf size affects a global resource pool which cannot support,
1565 * for example, all 24 equeues each requesting an snf size of 8K.
1566 *
1567 * To ensure that jumbo packets can be egressed properly, the snf size
1568 * should be set to the size of the largest possible packet, which
1569 * will usually be limited by the size of the app's largest buffer.
1570 *
1571 * This is a convenience wrapper around
1572 * gxio_mpipe_config_edma_ring_blks().
1573 *
1574 * This function should not be called after any egress has been done
1575 * on the equeue.
1576 *
1577 * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
1578 * @param size The snf size, in bytes.
1579 * @return Zero on success, negative error otherwise.
1580 */
1581static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
1582 size_t size)
1583{
1584 int blks = (size + 127) / 128;
1585 return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
1586 blks + 1, blks, 1);
1587}
1588
1497/***************************************************************** 1589/*****************************************************************
1498 * Link Management * 1590 * Link Management *
1499 ******************************************************************/ 1591 ******************************************************************/
@@ -1634,6 +1726,24 @@ typedef struct {
1634 uint8_t mac; 1726 uint8_t mac;
1635} gxio_mpipe_link_t; 1727} gxio_mpipe_link_t;
1636 1728
1729/* Translate a link name to the instance number of the mPIPE shim which is
1730 * connected to that link. This call does not verify whether the link is
1731 * currently available, and does not reserve any link resources;
1732 * gxio_mpipe_link_open() must be called to perform those functions.
1733 *
1734 * Typically applications will call this function to translate a link name
1735 * to an mPIPE instance number; call gxio_mpipe_init(), passing it that
1736 * instance number, to initialize the mPIPE shim; and then call
1737 * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
1738 * context, to configure the link.
1739 *
1740 * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
1741 * @return The mPIPE instance number which is associated with the named
1742 * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
1743 * not exist.
1744 */
1745extern int gxio_mpipe_link_instance(const char *link_name);
1746
1637/* Retrieve one of this system's legal link names, and its MAC address. 1747/* Retrieve one of this system's legal link names, and its MAC address.
1638 * 1748 *
1639 * @param index Link name index. If a system supports N legal link names, 1749 * @param index Link name index. If a system supports N legal link names,
@@ -1697,6 +1807,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
1697 return link->channel; 1807 return link->channel;
1698} 1808}
1699 1809
1810/* Set a link attribute.
1811 *
1812 * @param link A properly initialized link state object.
1813 * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
1814 * @param val New value of the attribute.
1815 * @return 0 if the attribute was successfully set, or a negative error
1816 * code.
1817 */
1818extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
1819 int64_t val);
1820
1700/////////////////////////////////////////////////////////////////// 1821///////////////////////////////////////////////////////////////////
1701// Timestamp // 1822// Timestamp //
1702/////////////////////////////////////////////////////////////////// 1823///////////////////////////////////////////////////////////////////
@@ -1733,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
1733extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, 1854extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
1734 int64_t delta); 1855 int64_t delta);
1735 1856
1857/** Adjust the mPIPE timestamp clock frequency.
1858 *
1859 * @param context An initialized mPIPE context.
1860 * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
1861 * The absolute value of ppb must be less than or equal to 1000000000.
1862 * Values less than about 30000 will generally cause a GXIO_ERR_INVAL
1863 * return due to the granularity of the hardware that converts reference
1864 * clock cycles into seconds and nanoseconds.
1865 * @return If the call was successful, zero; otherwise, a negative error
1866 * code.
1867 */
1868extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
1869 int32_t ppb);
1870
1736#endif /* !_GXIO_MPIPE_H_ */ 1871#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/gxio/uart.h b/arch/tile/include/gxio/uart.h
new file mode 100644
index 000000000000..438ee7e46c7b
--- /dev/null
+++ b/arch/tile/include/gxio/uart.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _GXIO_UART_H_
16#define _GXIO_UART_H_
17
18#include "common.h"
19
20#include <hv/drv_uart_intf.h>
21#include <hv/iorpc.h>
22
23/*
24 *
25 * An API for manipulating UART interface.
26 */
27
28/*
29 *
30 * The Rshim allows access to the processor's UART interface.
31 */
32
33/* A context object used to manage UART resources. */
34typedef struct {
35
36 /* File descriptor for calling up to the hypervisor. */
37 int fd;
38
39 /* The VA at which our MMIO registers are mapped. */
40 char *mmio_base;
41
42} gxio_uart_context_t;
43
44/* Request UART interrupts.
45 *
46 * Request that interrupts be delivered to a tile when the UART's
47 * Receive FIFO is written, or the Write FIFO is read.
48 *
49 * @param context Pointer to a properly initialized gxio_uart_context_t.
50 * @param bind_cpu_x X coordinate of CPU to which interrupt will be delivered.
51 * @param bind_cpu_y Y coordinate of CPU to which interrupt will be delivered.
52 * @param bind_interrupt IPI interrupt number.
53 * @param bind_event Sub-interrupt event bit number; a negative value can
54 * disable the interrupt.
55 * @return Zero if all of the requested UART events were successfully
56 * configured to interrupt.
57 */
58extern int gxio_uart_cfg_interrupt(gxio_uart_context_t *context,
59 int bind_cpu_x,
60 int bind_cpu_y,
61 int bind_interrupt, int bind_event);
62
63/* Initialize a UART context.
64 *
65 * A properly initialized context must be obtained before any of the other
66 * gxio_uart routines may be used.
67 *
68 * @param context Pointer to a gxio_uart_context_t, which will be initialized
69 * by this routine, if it succeeds.
70 * @param uart_index Index of the UART to use.
71 * @return Zero if the context was successfully initialized, else a
72 * GXIO_ERR_xxx error code.
73 */
74extern int gxio_uart_init(gxio_uart_context_t *context, int uart_index);
75
76/* Destroy a UART context.
77 *
78 * Once destroyed, a context may not be used with any gxio_uart routines
79 * other than gxio_uart_init(). After this routine returns, no further
80 * interrupts requested on this context will be delivered. The state and
81 * configuration of the pins which had been attached to this context are
82 * unchanged by this operation.
83 *
84 * @param context Pointer to a gxio_uart_context_t.
85 * @return Zero if the context was successfully destroyed, else a
86 * GXIO_ERR_xxx error code.
87 */
88extern int gxio_uart_destroy(gxio_uart_context_t *context);
89
90/* Write UART register.
91 * @param context Pointer to a gxio_uart_context_t.
92 * @param offset UART register offset.
93 * @param word Data will be wrote to UART reigister.
94 */
95extern void gxio_uart_write(gxio_uart_context_t *context, uint64_t offset,
96 uint64_t word);
97
98/* Read UART register.
99 * @param context Pointer to a gxio_uart_context_t.
100 * @param offset UART register offset.
101 * @return Data read from UART register.
102 */
103extern uint64_t gxio_uart_read(gxio_uart_context_t *context, uint64_t offset);
104
105#endif /* _GXIO_UART_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index 6cdae3bf046e..c97e416dd963 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -23,6 +23,9 @@
23#include <arch/mpipe_constants.h> 23#include <arch/mpipe_constants.h>
24 24
25 25
26/** Number of mPIPE instances supported */
27#define HV_MPIPE_INSTANCE_MAX (2)
28
26/** Number of buffer stacks (32). */ 29/** Number of buffer stacks (32). */
27#define HV_MPIPE_NUM_BUFFER_STACKS \ 30#define HV_MPIPE_NUM_BUFFER_STACKS \
28 (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH) 31 (MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
diff --git a/arch/tile/include/hv/drv_trio_intf.h b/arch/tile/include/hv/drv_trio_intf.h
index ef9f3f52ee27..237e04dee66c 100644
--- a/arch/tile/include/hv/drv_trio_intf.h
+++ b/arch/tile/include/hv/drv_trio_intf.h
@@ -64,8 +64,9 @@ struct pcie_port_property
64 * will not consider it an error if the link comes up as a x8 link. */ 64 * will not consider it an error if the link comes up as a x8 link. */
65 uint8_t allow_x8: 1; 65 uint8_t allow_x8: 1;
66 66
67 /** Reserved. */ 67 /** If true, this link is connected to a device which may or may not
68 uint8_t reserved: 1; 68 * be present. */
69 uint8_t removable: 1;
69 70
70}; 71};
71 72
@@ -167,6 +168,9 @@ pcie_stream_intr_config_sel_t;
167struct pcie_trio_ports_property 168struct pcie_trio_ports_property
168{ 169{
169 struct pcie_port_property ports[TILEGX_TRIO_PCIES]; 170 struct pcie_port_property ports[TILEGX_TRIO_PCIES];
171
172 /** Set if this TRIO belongs to a Gx72 device. */
173 uint8_t is_gx72;
170}; 174};
171 175
172/* Flags indicating traffic class. */ 176/* Flags indicating traffic class. */
diff --git a/arch/tile/include/hv/drv_uart_intf.h b/arch/tile/include/hv/drv_uart_intf.h
new file mode 100644
index 000000000000..f5379e2404fd
--- /dev/null
+++ b/arch/tile/include/hv/drv_uart_intf.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * Interface definitions for the UART driver.
17 */
18
19#ifndef _SYS_HV_DRV_UART_INTF_H
20#define _SYS_HV_DRV_UART_INTF_H
21
22#include <arch/uart.h>
23
24/** Number of UART ports supported. */
25#define TILEGX_UART_NR 2
26
27/** The mmap file offset (PA) of the UART MMIO region. */
28#define HV_UART_MMIO_OFFSET 0
29
30/** The maximum size of the UARTs MMIO region (64K Bytes). */
31#define HV_UART_MMIO_SIZE (1UL << 16)
32
33#endif /* _SYS_HV_DRV_UART_INTF_H */
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 837dca5328c2..dfcdeb61ba34 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -318,8 +318,11 @@
318/** hv_set_pte_super_shift */ 318/** hv_set_pte_super_shift */
319#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57 319#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
320 320
321/** hv_console_set_ipi */
322#define HV_DISPATCH_CONSOLE_SET_IPI 63
323
321/** One more than the largest dispatch value */ 324/** One more than the largest dispatch value */
322#define _HV_DISPATCH_END 58 325#define _HV_DISPATCH_END 64
323 326
324 327
325#ifndef __ASSEMBLER__ 328#ifndef __ASSEMBLER__
@@ -541,14 +544,24 @@ typedef enum {
541 HV_CONFSTR_CPUMOD_REV = 18, 544 HV_CONFSTR_CPUMOD_REV = 18,
542 545
543 /** Human-readable CPU module description. */ 546 /** Human-readable CPU module description. */
544 HV_CONFSTR_CPUMOD_DESC = 19 547 HV_CONFSTR_CPUMOD_DESC = 19,
548
549 /** Per-tile hypervisor statistics. When this identifier is specified,
550 * the hv_confstr call takes two extra arguments. The first is the
551 * HV_XY_TO_LOTAR of the target tile's coordinates. The second is
552 * a flag word. The only current flag is the lowest bit, which means
553 * "zero out the stats instead of retrieving them"; in this case the
554 * buffer and buffer length are ignored. */
555 HV_CONFSTR_HV_STATS = 20
545 556
546} HV_ConfstrQuery; 557} HV_ConfstrQuery;
547 558
548/** Query a configuration string from the hypervisor. 559/** Query a configuration string from the hypervisor.
549 * 560 *
550 * @param query Identifier for the specific string to be retrieved 561 * @param query Identifier for the specific string to be retrieved
551 * (HV_CONFSTR_xxx). 562 * (HV_CONFSTR_xxx). Some strings may require or permit extra
563 * arguments to be appended which select specific objects to be
564 * described; see the string descriptions above.
552 * @param buf Buffer in which to place the string. 565 * @param buf Buffer in which to place the string.
553 * @param len Length of the buffer. 566 * @param len Length of the buffer.
554 * @return If query is valid, then the length of the corresponding string, 567 * @return If query is valid, then the length of the corresponding string,
@@ -556,21 +569,16 @@ typedef enum {
556 * was truncated. If query is invalid, HV_EINVAL. If the specified 569 * was truncated. If query is invalid, HV_EINVAL. If the specified
557 * buffer is not writable by the client, HV_EFAULT. 570 * buffer is not writable by the client, HV_EFAULT.
558 */ 571 */
559int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len); 572int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len, ...);
560 573
561/** Tile coordinate */ 574/** Tile coordinate */
562typedef struct 575typedef struct
563{ 576{
564#ifndef __BIG_ENDIAN__
565 /** X coordinate, relative to supervisor's top-left coordinate */ 577 /** X coordinate, relative to supervisor's top-left coordinate */
566 int x; 578 int x;
567 579
568 /** Y coordinate, relative to supervisor's top-left coordinate */ 580 /** Y coordinate, relative to supervisor's top-left coordinate */
569 int y; 581 int y;
570#else
571 int y;
572 int x;
573#endif
574} HV_Coord; 582} HV_Coord;
575 583
576 584
@@ -585,6 +593,30 @@ typedef struct
585 */ 593 */
586int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte); 594int hv_get_ipi_pte(HV_Coord tile, int pl, HV_PTE* pte);
587 595
596/** Configure the console interrupt.
597 *
598 * When the console client interrupt is enabled, the hypervisor will
599 * deliver the specified IPI to the client in the following situations:
600 *
601 * - The console has at least one character available for input.
602 *
603 * - The console can accept new characters for output, and the last call
604 * to hv_console_write() did not write all of the characters requested
605 * by the client.
606 *
607 * Note that in some system configurations, console interrupt will not
608 * be available; clients should be prepared for this routine to fail and
609 * to fall back to periodic console polling in that case.
610 *
611 * @param ipi Index of the IPI register which will receive the interrupt.
612 * @param event IPI event number for console interrupt. If less than 0,
613 * disable the console IPI interrupt.
614 * @param coord Tile to be targeted for console interrupt.
615 * @return 0 on success, otherwise, HV_EINVAL if illegal parameter,
616 * HV_ENOTSUP if console interrupt are not available.
617 */
618int hv_console_set_ipi(int ipi, int event, HV_Coord coord);
619
588#else /* !CHIP_HAS_IPI() */ 620#else /* !CHIP_HAS_IPI() */
589 621
590/** A set of interrupts. */ 622/** A set of interrupts. */
@@ -1092,13 +1124,8 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
1092/** A range of ASID values. */ 1124/** A range of ASID values. */
1093typedef struct 1125typedef struct
1094{ 1126{
1095#ifndef __BIG_ENDIAN__
1096 HV_ASID start; /**< First ASID in the range. */ 1127 HV_ASID start; /**< First ASID in the range. */
1097 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ 1128 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
1098#else
1099 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
1100 HV_ASID start; /**< First ASID in the range. */
1101#endif
1102} HV_ASIDRange; 1129} HV_ASIDRange;
1103 1130
1104/** Returns information about a range of ASIDs. 1131/** Returns information about a range of ASIDs.
@@ -1422,7 +1449,6 @@ typedef enum
1422/** Message recipient. */ 1449/** Message recipient. */
1423typedef struct 1450typedef struct
1424{ 1451{
1425#ifndef __BIG_ENDIAN__
1426 /** X coordinate, relative to supervisor's top-left coordinate */ 1452 /** X coordinate, relative to supervisor's top-left coordinate */
1427 unsigned int x:11; 1453 unsigned int x:11;
1428 1454
@@ -1431,11 +1457,6 @@ typedef struct
1431 1457
1432 /** Status of this recipient */ 1458 /** Status of this recipient */
1433 HV_Recip_State state:10; 1459 HV_Recip_State state:10;
1434#else //__BIG_ENDIAN__
1435 HV_Recip_State state:10;
1436 unsigned int y:11;
1437 unsigned int x:11;
1438#endif
1439} HV_Recipient; 1460} HV_Recipient;
1440 1461
1441/** Send a message to a set of recipients. 1462/** Send a message to a set of recipients.
diff --git a/arch/tile/include/uapi/arch/Kbuild b/arch/tile/include/uapi/arch/Kbuild
index 4ebc34f4768d..97dfbecec6b6 100644
--- a/arch/tile/include/uapi/arch/Kbuild
+++ b/arch/tile/include/uapi/arch/Kbuild
@@ -1,7 +1,6 @@
1# UAPI Header export list 1# UAPI Header export list
2header-y += abi.h 2header-y += abi.h
3header-y += chip.h 3header-y += chip.h
4header-y += chip_tile64.h
5header-y += chip_tilegx.h 4header-y += chip_tilegx.h
6header-y += chip_tilepro.h 5header-y += chip_tilepro.h
7header-y += icache.h 6header-y += icache.h
diff --git a/arch/tile/include/uapi/arch/chip.h b/arch/tile/include/uapi/arch/chip.h
index 926d3db0e91e..4c91f90b9369 100644
--- a/arch/tile/include/uapi/arch/chip.h
+++ b/arch/tile/include/uapi/arch/chip.h
@@ -12,9 +12,7 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#if __tile_chip__ == 0 15#if __tile_chip__ == 1
16#include <arch/chip_tile64.h>
17#elif __tile_chip__ == 1
18#include <arch/chip_tilepro.h> 16#include <arch/chip_tilepro.h>
19#elif defined(__tilegx__) 17#elif defined(__tilegx__)
20#include <arch/chip_tilegx.h> 18#include <arch/chip_tilegx.h>
diff --git a/arch/tile/include/uapi/arch/chip_tile64.h b/arch/tile/include/uapi/arch/chip_tile64.h
deleted file mode 100644
index 261aaba092d4..000000000000
--- a/arch/tile/include/uapi/arch/chip_tile64.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * @file
17 * Global header file.
18 * This header file specifies defines for TILE64.
19 */
20
21#ifndef __ARCH_CHIP_H__
22#define __ARCH_CHIP_H__
23
24/** Specify chip version.
25 * When possible, prefer the CHIP_xxx symbols below for future-proofing.
26 * This is intended for cross-compiling; native compilation should
27 * use the predefined __tile_chip__ symbol.
28 */
29#define TILE_CHIP 0
30
31/** Specify chip revision.
32 * This provides for the case of a respin of a particular chip type;
33 * the normal value for this symbol is "0".
34 * This is intended for cross-compiling; native compilation should
35 * use the predefined __tile_chip_rev__ symbol.
36 */
37#define TILE_CHIP_REV 0
38
39/** The name of this architecture. */
40#define CHIP_ARCH_NAME "tile64"
41
42/** The ELF e_machine type for binaries for this chip. */
43#define CHIP_ELF_TYPE() EM_TILE64
44
45/** The alternate ELF e_machine type for binaries for this chip. */
46#define CHIP_COMPAT_ELF_TYPE() 0x2506
47
48/** What is the native word size of the machine? */
49#define CHIP_WORD_SIZE() 32
50
51/** How many bits of a virtual address are used. Extra bits must be
52 * the sign extension of the low bits.
53 */
54#define CHIP_VA_WIDTH() 32
55
56/** How many bits are in a physical address? */
57#define CHIP_PA_WIDTH() 36
58
59/** Size of the L2 cache, in bytes. */
60#define CHIP_L2_CACHE_SIZE() 65536
61
62/** Log size of an L2 cache line in bytes. */
63#define CHIP_L2_LOG_LINE_SIZE() 6
64
65/** Size of an L2 cache line, in bytes. */
66#define CHIP_L2_LINE_SIZE() (1 << CHIP_L2_LOG_LINE_SIZE())
67
68/** Associativity of the L2 cache. */
69#define CHIP_L2_ASSOC() 2
70
71/** Size of the L1 data cache, in bytes. */
72#define CHIP_L1D_CACHE_SIZE() 8192
73
74/** Log size of an L1 data cache line in bytes. */
75#define CHIP_L1D_LOG_LINE_SIZE() 4
76
77/** Size of an L1 data cache line, in bytes. */
78#define CHIP_L1D_LINE_SIZE() (1 << CHIP_L1D_LOG_LINE_SIZE())
79
80/** Associativity of the L1 data cache. */
81#define CHIP_L1D_ASSOC() 2
82
83/** Size of the L1 instruction cache, in bytes. */
84#define CHIP_L1I_CACHE_SIZE() 8192
85
86/** Log size of an L1 instruction cache line in bytes. */
87#define CHIP_L1I_LOG_LINE_SIZE() 6
88
89/** Size of an L1 instruction cache line, in bytes. */
90#define CHIP_L1I_LINE_SIZE() (1 << CHIP_L1I_LOG_LINE_SIZE())
91
92/** Associativity of the L1 instruction cache. */
93#define CHIP_L1I_ASSOC() 1
94
95/** Stride with which flush instructions must be issued. */
96#define CHIP_FLUSH_STRIDE() CHIP_L2_LINE_SIZE()
97
98/** Stride with which inv instructions must be issued. */
99#define CHIP_INV_STRIDE() CHIP_L1D_LINE_SIZE()
100
101/** Stride with which finv instructions must be issued. */
102#define CHIP_FINV_STRIDE() CHIP_L1D_LINE_SIZE()
103
104/** Can the local cache coherently cache data that is homed elsewhere? */
105#define CHIP_HAS_COHERENT_LOCAL_CACHE() 0
106
107/** How many simultaneous outstanding victims can the L2 cache have? */
108#define CHIP_MAX_OUTSTANDING_VICTIMS() 2
109
110/** Does the TLB support the NC and NOALLOC bits? */
111#define CHIP_HAS_NC_AND_NOALLOC_BITS() 0
112
113/** Does the chip support hash-for-home caching? */
114#define CHIP_HAS_CBOX_HOME_MAP() 0
115
116/** Number of entries in the chip's home map tables. */
117/* #define CHIP_CBOX_HOME_MAP_SIZE() -- does not apply to chip 0 */
118
119/** Do uncacheable requests miss in the cache regardless of whether
120 * there is matching data? */
121#define CHIP_HAS_ENFORCED_UNCACHEABLE_REQUESTS() 0
122
123/** Does the mf instruction wait for victims? */
124#define CHIP_HAS_MF_WAITS_FOR_VICTIMS() 1
125
126/** Does the chip have an "inv" instruction that doesn't also flush? */
127#define CHIP_HAS_INV() 0
128
129/** Does the chip have a "wh64" instruction? */
130#define CHIP_HAS_WH64() 0
131
132/** Does this chip have a 'dword_align' instruction? */
133#define CHIP_HAS_DWORD_ALIGN() 0
134
135/** Number of performance counters. */
136#define CHIP_PERFORMANCE_COUNTERS() 2
137
138/** Does this chip have auxiliary performance counters? */
139#define CHIP_HAS_AUX_PERF_COUNTERS() 0
140
141/** Is the CBOX_MSR1 SPR supported? */
142#define CHIP_HAS_CBOX_MSR1() 0
143
144/** Is the TILE_RTF_HWM SPR supported? */
145#define CHIP_HAS_TILE_RTF_HWM() 0
146
147/** Is the TILE_WRITE_PENDING SPR supported? */
148#define CHIP_HAS_TILE_WRITE_PENDING() 0
149
150/** Is the PROC_STATUS SPR supported? */
151#define CHIP_HAS_PROC_STATUS_SPR() 0
152
153/** Is the DSTREAM_PF SPR supported? */
154#define CHIP_HAS_DSTREAM_PF() 0
155
156/** Log of the number of mshims we have. */
157#define CHIP_LOG_NUM_MSHIMS() 2
158
159/** Are the bases of the interrupt vector areas fixed? */
160#define CHIP_HAS_FIXED_INTVEC_BASE() 1
161
162/** Are the interrupt masks split up into 2 SPRs? */
163#define CHIP_HAS_SPLIT_INTR_MASK() 1
164
165/** Is the cycle count split up into 2 SPRs? */
166#define CHIP_HAS_SPLIT_CYCLE() 1
167
168/** Does the chip have a static network? */
169#define CHIP_HAS_SN() 1
170
171/** Does the chip have a static network processor? */
172#define CHIP_HAS_SN_PROC() 1
173
174/** Size of the L1 static network processor instruction cache, in bytes. */
175#define CHIP_L1SNI_CACHE_SIZE() 2048
176
177/** Does the chip have DMA support in each tile? */
178#define CHIP_HAS_TILE_DMA() 1
179
180/** Does the chip have the second revision of the directly accessible
181 * dynamic networks? This encapsulates a number of characteristics,
182 * including the absence of the catch-all, the absence of inline message
183 * tags, the absence of support for network context-switching, and so on.
184 */
185#define CHIP_HAS_REV1_XDN() 0
186
187/** Does the chip have cmpexch and similar (fetchadd, exch, etc.)? */
188#define CHIP_HAS_CMPEXCH() 0
189
190/** Does the chip have memory-mapped I/O support? */
191#define CHIP_HAS_MMIO() 0
192
193/** Does the chip have post-completion interrupts? */
194#define CHIP_HAS_POST_COMPLETION_INTERRUPTS() 0
195
196/** Does the chip have native single step support? */
197#define CHIP_HAS_SINGLE_STEP() 0
198
199#ifndef __OPEN_SOURCE__ /* features only relevant to hypervisor-level code */
200
201/** How many entries are present in the instruction TLB? */
202#define CHIP_ITLB_ENTRIES() 8
203
204/** How many entries are present in the data TLB? */
205#define CHIP_DTLB_ENTRIES() 16
206
207/** How many MAF entries does the XAUI shim have? */
208#define CHIP_XAUI_MAF_ENTRIES() 16
209
210/** Does the memory shim have a source-id table? */
211#define CHIP_HAS_MSHIM_SRCID_TABLE() 1
212
213/** Does the L1 instruction cache clear on reset? */
214#define CHIP_HAS_L1I_CLEAR_ON_RESET() 0
215
216/** Does the chip come out of reset with valid coordinates on all tiles?
217 * Note that if defined, this also implies that the upper left is 1,1.
218 */
219#define CHIP_HAS_VALID_TILE_COORD_RESET() 0
220
221/** Does the chip have unified packet formats? */
222#define CHIP_HAS_UNIFIED_PACKET_FORMATS() 0
223
224/** Does the chip support write reordering? */
225#define CHIP_HAS_WRITE_REORDERING() 0
226
227/** Does the chip support Y-X routing as well as X-Y? */
228#define CHIP_HAS_Y_X_ROUTING() 0
229
230/** Is INTCTRL_3 managed with the correct MPL? */
231#define CHIP_HAS_INTCTRL_3_STATUS_FIX() 0
232
233/** Is it possible to configure the chip to be big-endian? */
234#define CHIP_HAS_BIG_ENDIAN_CONFIG() 0
235
236/** Is the CACHE_RED_WAY_OVERRIDDEN SPR supported? */
237#define CHIP_HAS_CACHE_RED_WAY_OVERRIDDEN() 0
238
239/** Is the DIAG_TRACE_WAY SPR supported? */
240#define CHIP_HAS_DIAG_TRACE_WAY() 0
241
242/** Is the MEM_STRIPE_CONFIG SPR supported? */
243#define CHIP_HAS_MEM_STRIPE_CONFIG() 0
244
245/** Are the TLB_PERF SPRs supported? */
246#define CHIP_HAS_TLB_PERF() 0
247
248/** Is the VDN_SNOOP_SHIM_CTL SPR supported? */
249#define CHIP_HAS_VDN_SNOOP_SHIM_CTL() 0
250
251/** Does the chip support rev1 DMA packets? */
252#define CHIP_HAS_REV1_DMA_PACKETS() 0
253
254/** Does the chip have an IPI shim? */
255#define CHIP_HAS_IPI() 0
256
257#endif /* !__OPEN_SOURCE__ */
258#endif /* __ARCH_CHIP_H__ */
diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h
index c14d02c81600..d76ff2db745e 100644
--- a/arch/tile/include/uapi/arch/opcode_tilegx.h
+++ b/arch/tile/include/uapi/arch/opcode_tilegx.h
@@ -61,6 +61,7 @@ typedef tilegx_bundle_bits tile_bundle_bits;
61#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES 61#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEGX_BUNDLE_ALIGNMENT_IN_BYTES
62#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ 62#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
63 TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES 63 TILEGX_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
64#define TILE_BPT_BUNDLE TILEGX_BPT_BUNDLE
64 65
65/* 64-bit pattern for a { bpt ; nop } bundle. */ 66/* 64-bit pattern for a { bpt ; nop } bundle. */
66#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL 67#define TILEGX_BPT_BUNDLE 0x286a44ae51485000ULL
diff --git a/arch/tile/include/uapi/arch/opcode_tilepro.h b/arch/tile/include/uapi/arch/opcode_tilepro.h
index 71b763b8ce83..4451cff1a861 100644
--- a/arch/tile/include/uapi/arch/opcode_tilepro.h
+++ b/arch/tile/include/uapi/arch/opcode_tilepro.h
@@ -71,6 +71,7 @@ typedef tilepro_bundle_bits tile_bundle_bits;
71#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES 71#define TILE_BUNDLE_ALIGNMENT_IN_BYTES TILEPRO_BUNDLE_ALIGNMENT_IN_BYTES
72#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \ 72#define TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES \
73 TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES 73 TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES
74#define TILE_BPT_BUNDLE TILEPRO_BPT_BUNDLE
74 75
75/* 64-bit pattern for a { bpt ; nop } bundle. */ 76/* 64-bit pattern for a { bpt ; nop } bundle. */
76#define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL 77#define TILEPRO_BPT_BUNDLE 0x400b3cae70166000ULL
diff --git a/arch/tile/include/uapi/arch/spr_def_32.h b/arch/tile/include/uapi/arch/spr_def_32.h
index c689446e6284..78daa3146d25 100644
--- a/arch/tile/include/uapi/arch/spr_def_32.h
+++ b/arch/tile/include/uapi/arch/spr_def_32.h
@@ -200,8 +200,6 @@
200#define SPR_SIM_CONTROL 0x4e0c 200#define SPR_SIM_CONTROL 0x4e0c
201#define SPR_SNCTL 0x0805 201#define SPR_SNCTL 0x0805
202#define SPR_SNCTL__FRZFABRIC_MASK 0x1 202#define SPR_SNCTL__FRZFABRIC_MASK 0x1
203#define SPR_SNCTL__FRZPROC_MASK 0x2
204#define SPR_SNPC 0x080b
205#define SPR_SNSTATIC 0x080c 203#define SPR_SNSTATIC 0x080c
206#define SPR_SYSTEM_SAVE_0_0 0x4b00 204#define SPR_SYSTEM_SAVE_0_0 0x4b00
207#define SPR_SYSTEM_SAVE_0_1 0x4b01 205#define SPR_SYSTEM_SAVE_0_1 0x4b01
diff --git a/arch/tile/include/uapi/asm/auxvec.h b/arch/tile/include/uapi/asm/auxvec.h
index 1d393edb0641..c93e92709f14 100644
--- a/arch/tile/include/uapi/asm/auxvec.h
+++ b/arch/tile/include/uapi/asm/auxvec.h
@@ -15,6 +15,7 @@
15#ifndef _ASM_TILE_AUXVEC_H 15#ifndef _ASM_TILE_AUXVEC_H
16#define _ASM_TILE_AUXVEC_H 16#define _ASM_TILE_AUXVEC_H
17 17
18/* No extensions to auxvec */ 18/* The vDSO location. */
19#define AT_SYSINFO_EHDR 33
19 20
20#endif /* _ASM_TILE_AUXVEC_H */ 21#endif /* _ASM_TILE_AUXVEC_H */
diff --git a/arch/tile/include/uapi/asm/cachectl.h b/arch/tile/include/uapi/asm/cachectl.h
index af4c9f9154d1..572ddcad2090 100644
--- a/arch/tile/include/uapi/asm/cachectl.h
+++ b/arch/tile/include/uapi/asm/cachectl.h
@@ -29,8 +29,8 @@
29 * to honor the arguments at some point.) 29 * to honor the arguments at some point.)
30 * 30 *
31 * Flush and invalidation of memory can normally be performed with the 31 * Flush and invalidation of memory can normally be performed with the
32 * __insn_flush(), __insn_inv(), and __insn_finv() instructions from 32 * __insn_flush() and __insn_finv() instructions from userspace.
33 * userspace. The DCACHE option to the system call allows userspace 33 * The DCACHE option to the system call allows userspace
34 * to flush the entire L1+L2 data cache from the core. In this case, 34 * to flush the entire L1+L2 data cache from the core. In this case,
35 * the address and length arguments are not used. The DCACHE flush is 35 * the address and length arguments are not used. The DCACHE flush is
36 * restricted to the current core, not all cores in the address space. 36 * restricted to the current core, not all cores in the address space.
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 5334be8e2538..27a2bf39dae8 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -3,11 +3,17 @@
3# 3#
4 4
5extra-y := vmlinux.lds head_$(BITS).o 5extra-y := vmlinux.lds head_$(BITS).o
6obj-y := backtrace.o entry.o irq.o messaging.o \ 6obj-y := backtrace.o entry.o hvglue.o irq.o messaging.o \
7 pci-dma.o proc.o process.o ptrace.o reboot.o \ 7 pci-dma.o proc.o process.o ptrace.o reboot.o \
8 setup.o signal.o single_step.o stack.o sys.o sysfs.o time.o traps.o \ 8 setup.o signal.o single_step.o stack.o sys.o \
9 sysfs.o time.o traps.o unaligned.o vdso.o \
9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o 10 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
10 11
12ifdef CONFIG_FUNCTION_TRACER
13CFLAGS_REMOVE_ftrace.o = -pg
14CFLAGS_REMOVE_early_printk.o = -pg
15endif
16
11obj-$(CONFIG_HARDWALL) += hardwall.o 17obj-$(CONFIG_HARDWALL) += hardwall.o
12obj-$(CONFIG_COMPAT) += compat.o compat_signal.o 18obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
13obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o 19obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
@@ -20,3 +26,9 @@ else
20obj-$(CONFIG_PCI) += pci.o 26obj-$(CONFIG_PCI) += pci.o
21endif 27endif
22obj-$(CONFIG_TILE_USB) += usb.o 28obj-$(CONFIG_TILE_USB) += usb.o
29obj-$(CONFIG_TILE_HVGLUE_TRACE) += hvglue_trace.o
30obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount_64.o
31obj-$(CONFIG_KPROBES) += kprobes.o
32obj-$(CONFIG_KGDB) += kgdb.o
33
34obj-y += vdso/
diff --git a/arch/tile/kernel/asm-offsets.c b/arch/tile/kernel/asm-offsets.c
index 01ddf19cc36d..375e7c321eef 100644
--- a/arch/tile/kernel/asm-offsets.c
+++ b/arch/tile/kernel/asm-offsets.c
@@ -14,13 +14,6 @@
14 * Generates definitions from c-type structures used by assembly sources. 14 * Generates definitions from c-type structures used by assembly sources.
15 */ 15 */
16 16
17#include <linux/kbuild.h>
18#include <linux/thread_info.h>
19#include <linux/sched.h>
20#include <linux/hardirq.h>
21#include <linux/ptrace.h>
22#include <hv/hypervisor.h>
23
24/* Check for compatible compiler early in the build. */ 17/* Check for compatible compiler early in the build. */
25#ifdef CONFIG_TILEGX 18#ifdef CONFIG_TILEGX
26# ifndef __tilegx__ 19# ifndef __tilegx__
@@ -31,46 +24,61 @@
31# endif 24# endif
32#else 25#else
33# ifdef __tilegx__ 26# ifdef __tilegx__
34# error Can not build TILEPro/TILE64 configurations with tilegx compiler 27# error Can not build TILEPro configurations with tilegx compiler
35# endif 28# endif
36#endif 29#endif
37 30
31#include <linux/kbuild.h>
32#include <linux/thread_info.h>
33#include <linux/sched.h>
34#include <linux/hardirq.h>
35#include <linux/ptrace.h>
36#include <hv/hypervisor.h>
37
38void foo(void) 38void foo(void)
39{ 39{
40 DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET, \ 40 DEFINE(SINGLESTEP_STATE_BUFFER_OFFSET,
41 offsetof(struct single_step_state, buffer)); 41 offsetof(struct single_step_state, buffer));
42 DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET, \ 42 DEFINE(SINGLESTEP_STATE_FLAGS_OFFSET,
43 offsetof(struct single_step_state, flags)); 43 offsetof(struct single_step_state, flags));
44 DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET, \ 44 DEFINE(SINGLESTEP_STATE_ORIG_PC_OFFSET,
45 offsetof(struct single_step_state, orig_pc)); 45 offsetof(struct single_step_state, orig_pc));
46 DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET, \ 46 DEFINE(SINGLESTEP_STATE_NEXT_PC_OFFSET,
47 offsetof(struct single_step_state, next_pc)); 47 offsetof(struct single_step_state, next_pc));
48 DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET, \ 48 DEFINE(SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET,
49 offsetof(struct single_step_state, branch_next_pc)); 49 offsetof(struct single_step_state, branch_next_pc));
50 DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET, \ 50 DEFINE(SINGLESTEP_STATE_UPDATE_VALUE_OFFSET,
51 offsetof(struct single_step_state, update_value)); 51 offsetof(struct single_step_state, update_value));
52 52
53 DEFINE(THREAD_INFO_TASK_OFFSET, \ 53 DEFINE(THREAD_INFO_TASK_OFFSET,
54 offsetof(struct thread_info, task)); 54 offsetof(struct thread_info, task));
55 DEFINE(THREAD_INFO_FLAGS_OFFSET, \ 55 DEFINE(THREAD_INFO_FLAGS_OFFSET,
56 offsetof(struct thread_info, flags)); 56 offsetof(struct thread_info, flags));
57 DEFINE(THREAD_INFO_STATUS_OFFSET, \ 57 DEFINE(THREAD_INFO_STATUS_OFFSET,
58 offsetof(struct thread_info, status)); 58 offsetof(struct thread_info, status));
59 DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET, \ 59 DEFINE(THREAD_INFO_HOMECACHE_CPU_OFFSET,
60 offsetof(struct thread_info, homecache_cpu)); 60 offsetof(struct thread_info, homecache_cpu));
61 DEFINE(THREAD_INFO_STEP_STATE_OFFSET, \ 61 DEFINE(THREAD_INFO_PREEMPT_COUNT_OFFSET,
62 offsetof(struct thread_info, preempt_count));
63 DEFINE(THREAD_INFO_STEP_STATE_OFFSET,
62 offsetof(struct thread_info, step_state)); 64 offsetof(struct thread_info, step_state));
65#ifdef __tilegx__
66 DEFINE(THREAD_INFO_UNALIGN_JIT_BASE_OFFSET,
67 offsetof(struct thread_info, unalign_jit_base));
68 DEFINE(THREAD_INFO_UNALIGN_JIT_TMP_OFFSET,
69 offsetof(struct thread_info, unalign_jit_tmp));
70#endif
63 71
64 DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET, 72 DEFINE(TASK_STRUCT_THREAD_KSP_OFFSET,
65 offsetof(struct task_struct, thread.ksp)); 73 offsetof(struct task_struct, thread.ksp));
66 DEFINE(TASK_STRUCT_THREAD_PC_OFFSET, 74 DEFINE(TASK_STRUCT_THREAD_PC_OFFSET,
67 offsetof(struct task_struct, thread.pc)); 75 offsetof(struct task_struct, thread.pc));
68 76
69 DEFINE(HV_TOPOLOGY_WIDTH_OFFSET, \ 77 DEFINE(HV_TOPOLOGY_WIDTH_OFFSET,
70 offsetof(HV_Topology, width)); 78 offsetof(HV_Topology, width));
71 DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET, \ 79 DEFINE(HV_TOPOLOGY_HEIGHT_OFFSET,
72 offsetof(HV_Topology, height)); 80 offsetof(HV_Topology, height));
73 81
74 DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET, \ 82 DEFINE(IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET,
75 offsetof(irq_cpustat_t, irq_syscall_count)); 83 offsetof(irq_cpustat_t, irq_syscall_count));
76} 84}
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index d0a052e725be..85e00b2f39bf 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -32,6 +32,7 @@
32#include <asm/ucontext.h> 32#include <asm/ucontext.h>
33#include <asm/sigframe.h> 33#include <asm/sigframe.h>
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/vdso.h>
35#include <arch/interrupts.h> 36#include <arch/interrupts.h>
36 37
37struct compat_ucontext { 38struct compat_ucontext {
@@ -227,7 +228,7 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
227 if (err) 228 if (err)
228 goto give_sigsegv; 229 goto give_sigsegv;
229 230
230 restorer = VDSO_BASE; 231 restorer = VDSO_SYM(&__vdso_rt_sigreturn);
231 if (ka->sa.sa_flags & SA_RESTORER) 232 if (ka->sa.sa_flags & SA_RESTORER)
232 restorer = ptr_to_compat_reg(ka->sa.sa_restorer); 233 restorer = ptr_to_compat_reg(ka->sa.sa_restorer);
233 234
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 34d72a151bf3..b608e00e7f6d 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -23,19 +23,24 @@
23 23
24static void early_hv_write(struct console *con, const char *s, unsigned n) 24static void early_hv_write(struct console *con, const char *s, unsigned n)
25{ 25{
26 hv_console_write((HV_VirtAddr) s, n); 26 tile_console_write(s, n);
27
28 /*
29 * Convert NL to NLCR (close enough to CRNL) during early boot.
30 * We assume newlines are at the ends of strings, which turns out
31 * to be good enough for early boot console output.
32 */
33 if (n && s[n-1] == '\n')
34 tile_console_write("\r", 1);
27} 35}
28 36
29static struct console early_hv_console = { 37static struct console early_hv_console = {
30 .name = "earlyhv", 38 .name = "earlyhv",
31 .write = early_hv_write, 39 .write = early_hv_write,
32 .flags = CON_PRINTBUFFER, 40 .flags = CON_PRINTBUFFER | CON_BOOT,
33 .index = -1, 41 .index = -1,
34}; 42};
35 43
36/* Direct interface for emergencies */
37static int early_console_complete;
38
39void early_panic(const char *fmt, ...) 44void early_panic(const char *fmt, ...)
40{ 45{
41 va_list ap; 46 va_list ap;
@@ -43,51 +48,21 @@ void early_panic(const char *fmt, ...)
43 va_start(ap, fmt); 48 va_start(ap, fmt);
44 early_printk("Kernel panic - not syncing: "); 49 early_printk("Kernel panic - not syncing: ");
45 early_vprintk(fmt, ap); 50 early_vprintk(fmt, ap);
46 early_console->write(early_console, "\n", 1); 51 early_printk("\n");
47 va_end(ap); 52 va_end(ap);
48 dump_stack(); 53 dump_stack();
49 hv_halt(); 54 hv_halt();
50} 55}
51 56
52static int __initdata keep_early;
53
54static int __init setup_early_printk(char *str) 57static int __init setup_early_printk(char *str)
55{ 58{
56 if (early_console) 59 if (early_console)
57 return 1; 60 return 1;
58 61
59 if (str != NULL && strncmp(str, "keep", 4) == 0)
60 keep_early = 1;
61
62 early_console = &early_hv_console; 62 early_console = &early_hv_console;
63 register_console(early_console); 63 register_console(early_console);
64 64
65 return 0; 65 return 0;
66} 66}
67 67
68void __init disable_early_printk(void)
69{
70 early_console_complete = 1;
71 if (!early_console)
72 return;
73 if (!keep_early) {
74 early_printk("disabling early console\n");
75 unregister_console(early_console);
76 early_console = NULL;
77 } else {
78 early_printk("keeping early console\n");
79 }
80}
81
82void warn_early_printk(void)
83{
84 if (early_console_complete || early_console)
85 return;
86 early_printk("\
87Machine shutting down before console output is fully initialized.\n\
88You may wish to reboot and add the option 'earlyprintk' to your\n\
89boot command line to see any diagnostic early console output.\n\
90");
91}
92
93early_param("earlyprintk", setup_early_printk); 68early_param("earlyprintk", setup_early_printk);
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index f116cb0bce20..3d9175992a20 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -27,22 +27,6 @@ STD_ENTRY(current_text_addr)
27 { move r0, lr; jrp lr } 27 { move r0, lr; jrp lr }
28 STD_ENDPROC(current_text_addr) 28 STD_ENDPROC(current_text_addr)
29 29
30/*
31 * We don't run this function directly, but instead copy it to a page
32 * we map into every user process. See vdso_setup().
33 *
34 * Note that libc has a copy of this function that it uses to compare
35 * against the PC when a stack backtrace ends, so if this code is
36 * changed, the libc implementation(s) should also be updated.
37 */
38 .pushsection .data
39ENTRY(__rt_sigreturn)
40 moveli TREG_SYSCALL_NR_NAME,__NR_rt_sigreturn
41 swint1
42 ENDPROC(__rt_sigreturn)
43 ENTRY(__rt_sigreturn_end)
44 .popsection
45
46STD_ENTRY(dump_stack) 30STD_ENTRY(dump_stack)
47 { move r2, lr; lnk r1 } 31 { move r2, lr; lnk r1 }
48 { move r4, r52; addli r1, r1, dump_stack - . } 32 { move r4, r52; addli r1, r1, dump_stack - . }
diff --git a/arch/tile/kernel/ftrace.c b/arch/tile/kernel/ftrace.c
new file mode 100644
index 000000000000..f1c452092eeb
--- /dev/null
+++ b/arch/tile/kernel/ftrace.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE-Gx specific ftrace support
15 */
16
17#include <linux/ftrace.h>
18#include <linux/uaccess.h>
19
20#include <asm/cacheflush.h>
21#include <asm/ftrace.h>
22#include <asm/sections.h>
23
24#include <arch/opcode.h>
25
26#ifdef CONFIG_DYNAMIC_FTRACE
27
28static inline tilegx_bundle_bits NOP(void)
29{
30 return create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
31 create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
32 create_Opcode_X0(RRR_0_OPCODE_X0) |
33 create_UnaryOpcodeExtension_X1(NOP_UNARY_OPCODE_X1) |
34 create_RRROpcodeExtension_X1(UNARY_RRR_0_OPCODE_X1) |
35 create_Opcode_X1(RRR_0_OPCODE_X1);
36}
37
38static int machine_stopped __read_mostly;
39
40int ftrace_arch_code_modify_prepare(void)
41{
42 machine_stopped = 1;
43 return 0;
44}
45
46int ftrace_arch_code_modify_post_process(void)
47{
48 flush_icache_range(0, CHIP_L1I_CACHE_SIZE());
49 machine_stopped = 0;
50 return 0;
51}
52
53/*
54 * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic
55 * tracer just add one cycle overhead to every kernel function when disabled.
56 */
57static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
58 bool link)
59{
60 tilegx_bundle_bits opcode_x0, opcode_x1;
61 long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES;
62
63 if (link) {
64 /* opcode: jal addr */
65 opcode_x1 =
66 create_Opcode_X1(JUMP_OPCODE_X1) |
67 create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) |
68 create_JumpOff_X1(pcrel_by_instr);
69 } else {
70 /* opcode: j addr */
71 opcode_x1 =
72 create_Opcode_X1(JUMP_OPCODE_X1) |
73 create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) |
74 create_JumpOff_X1(pcrel_by_instr);
75 }
76
77 if (addr == FTRACE_ADDR) {
78 /* opcode: or r10, lr, zero */
79 opcode_x0 =
80 create_Dest_X0(10) |
81 create_SrcA_X0(TREG_LR) |
82 create_SrcB_X0(TREG_ZERO) |
83 create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) |
84 create_Opcode_X0(RRR_0_OPCODE_X0);
85 } else {
86 /* opcode: fnop */
87 opcode_x0 =
88 create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) |
89 create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) |
90 create_Opcode_X0(RRR_0_OPCODE_X0);
91 }
92
93 return opcode_x1 | opcode_x0;
94}
95
96static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
97{
98 return NOP();
99}
100
101static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
102{
103 return ftrace_gen_branch(pc, addr, true);
104}
105
106static int ftrace_modify_code(unsigned long pc, unsigned long old,
107 unsigned long new)
108{
109 unsigned long pc_wr;
110
111 /* Check if the address is in kernel text space and module space. */
112 if (!kernel_text_address(pc))
113 return -EINVAL;
114
115 /* Operate on writable kernel text mapping. */
116 pc_wr = pc - MEM_SV_START + PAGE_OFFSET;
117
118 if (probe_kernel_write((void *)pc_wr, &new, MCOUNT_INSN_SIZE))
119 return -EPERM;
120
121 smp_wmb();
122
123 if (!machine_stopped && num_online_cpus() > 1)
124 flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
125
126 return 0;
127}
128
129int ftrace_update_ftrace_func(ftrace_func_t func)
130{
131 unsigned long pc, old;
132 unsigned long new;
133 int ret;
134
135 pc = (unsigned long)&ftrace_call;
136 memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
137 new = ftrace_call_replace(pc, (unsigned long)func);
138
139 ret = ftrace_modify_code(pc, old, new);
140
141 return ret;
142}
143
144int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
145{
146 unsigned long new, old;
147 unsigned long ip = rec->ip;
148
149 old = ftrace_nop_replace(rec);
150 new = ftrace_call_replace(ip, addr);
151
152 return ftrace_modify_code(rec->ip, old, new);
153}
154
155int ftrace_make_nop(struct module *mod,
156 struct dyn_ftrace *rec, unsigned long addr)
157{
158 unsigned long ip = rec->ip;
159 unsigned long old;
160 unsigned long new;
161 int ret;
162
163 old = ftrace_call_replace(ip, addr);
164 new = ftrace_nop_replace(rec);
165 ret = ftrace_modify_code(ip, old, new);
166
167 return ret;
168}
169
170int __init ftrace_dyn_arch_init(void *data)
171{
172 *(unsigned long *)data = 0;
173
174 return 0;
175}
176#endif /* CONFIG_DYNAMIC_FTRACE */
177
178#ifdef CONFIG_FUNCTION_GRAPH_TRACER
179void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
180 unsigned long frame_pointer)
181{
182 unsigned long return_hooker = (unsigned long) &return_to_handler;
183 struct ftrace_graph_ent trace;
184 unsigned long old;
185 int err;
186
187 if (unlikely(atomic_read(&current->tracing_graph_pause)))
188 return;
189
190 old = *parent;
191 *parent = return_hooker;
192
193 err = ftrace_push_return_trace(old, self_addr, &trace.depth,
194 frame_pointer);
195 if (err == -EBUSY) {
196 *parent = old;
197 return;
198 }
199
200 trace.func = self_addr;
201
202 /* Only trace if the calling function expects to */
203 if (!ftrace_graph_entry(&trace)) {
204 current->curr_ret_stack--;
205 *parent = old;
206 }
207}
208
209#ifdef CONFIG_DYNAMIC_FTRACE
210extern unsigned long ftrace_graph_call;
211
212static int __ftrace_modify_caller(unsigned long *callsite,
213 void (*func) (void), bool enable)
214{
215 unsigned long caller_fn = (unsigned long) func;
216 unsigned long pc = (unsigned long) callsite;
217 unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
218 unsigned long nop = NOP();
219 unsigned long old = enable ? nop : branch;
220 unsigned long new = enable ? branch : nop;
221
222 return ftrace_modify_code(pc, old, new);
223}
224
225static int ftrace_modify_graph_caller(bool enable)
226{
227 int ret;
228
229 ret = __ftrace_modify_caller(&ftrace_graph_call,
230 ftrace_graph_caller,
231 enable);
232
233 return ret;
234}
235
236int ftrace_enable_ftrace_graph_caller(void)
237{
238 return ftrace_modify_graph_caller(true);
239}
240
241int ftrace_disable_ftrace_graph_caller(void)
242{
243 return ftrace_modify_graph_caller(false);
244}
245#endif /* CONFIG_DYNAMIC_FTRACE */
246#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 38ac189d9575..df27a1fd94a3 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -272,9 +272,9 @@ static void hardwall_setup_func(void *info)
272 struct hardwall_info *r = info; 272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type; 273 struct hardwall_type *hwt = r->type;
274 274
275 int cpu = smp_processor_id(); 275 int cpu = smp_processor_id(); /* on_each_cpu disables preemption */
276 int x = cpu % smp_width; 276 int x = cpu_x(cpu);
277 int y = cpu / smp_width; 277 int y = cpu_y(cpu);
278 int bits = 0; 278 int bits = 0;
279 if (x == r->ulhc_x) 279 if (x == r->ulhc_x)
280 bits |= W_PROTECT; 280 bits |= W_PROTECT;
@@ -317,6 +317,7 @@ static void hardwall_protect_rectangle(struct hardwall_info *r)
317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); 317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
318} 318}
319 319
320/* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */
320void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) 321void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
321{ 322{
322 struct hardwall_info *rect; 323 struct hardwall_info *rect;
@@ -325,7 +326,6 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
325 struct siginfo info; 326 struct siginfo info;
326 int cpu = smp_processor_id(); 327 int cpu = smp_processor_id();
327 int found_processes; 328 int found_processes;
328 unsigned long flags;
329 struct pt_regs *old_regs = set_irq_regs(regs); 329 struct pt_regs *old_regs = set_irq_regs(regs);
330 330
331 irq_enter(); 331 irq_enter();
@@ -346,7 +346,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
346 BUG_ON(hwt->disabled); 346 BUG_ON(hwt->disabled);
347 347
348 /* This tile trapped a network access; find the rectangle. */ 348 /* This tile trapped a network access; find the rectangle. */
349 spin_lock_irqsave(&hwt->lock, flags); 349 spin_lock(&hwt->lock);
350 list_for_each_entry(rect, &hwt->list, list) { 350 list_for_each_entry(rect, &hwt->list, list) {
351 if (cpumask_test_cpu(cpu, &rect->cpumask)) 351 if (cpumask_test_cpu(cpu, &rect->cpumask))
352 break; 352 break;
@@ -401,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
401 pr_notice("hardwall: no associated processes!\n"); 401 pr_notice("hardwall: no associated processes!\n");
402 402
403 done: 403 done:
404 spin_unlock_irqrestore(&hwt->lock, flags); 404 spin_unlock(&hwt->lock);
405 405
406 /* 406 /*
407 * We have to disable firewall interrupts now, or else when we 407 * We have to disable firewall interrupts now, or else when we
@@ -540,6 +540,14 @@ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
540 } 540 }
541 } 541 }
542 542
543 /*
544 * Eliminate cpus that are not part of this Linux client.
545 * Note that this allows for configurations that we might not want to
546 * support, such as one client on every even cpu, another client on
547 * every odd cpu.
548 */
549 cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask);
550
543 /* Confirm it doesn't overlap and add it to the list. */ 551 /* Confirm it doesn't overlap and add it to the list. */
544 spin_lock_irqsave(&hwt->lock, flags); 552 spin_lock_irqsave(&hwt->lock, flags);
545 list_for_each_entry(iter, &hwt->list, list) { 553 list_for_each_entry(iter, &hwt->list, list) {
@@ -612,7 +620,7 @@ static int hardwall_activate(struct hardwall_info *info)
612 620
613/* 621/*
614 * Deactivate a task's hardwall. Must hold lock for hardwall_type. 622 * Deactivate a task's hardwall. Must hold lock for hardwall_type.
615 * This method may be called from free_task(), so we don't want to 623 * This method may be called from exit_thread(), so we don't want to
616 * rely on too many fields of struct task_struct still being valid. 624 * rely on too many fields of struct task_struct still being valid.
617 * We assume the cpus_allowed, pid, and comm fields are still valid. 625 * We assume the cpus_allowed, pid, and comm fields are still valid.
618 */ 626 */
@@ -653,7 +661,7 @@ static int hardwall_deactivate(struct hardwall_type *hwt,
653 return -EINVAL; 661 return -EINVAL;
654 662
655 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", 663 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
656 task->pid, task->comm, hwt->name, smp_processor_id()); 664 task->pid, task->comm, hwt->name, raw_smp_processor_id());
657 return 0; 665 return 0;
658} 666}
659 667
@@ -795,8 +803,8 @@ static void reset_xdn_network_state(struct hardwall_type *hwt)
795 /* Reset UDN coordinates to their standard value */ 803 /* Reset UDN coordinates to their standard value */
796 { 804 {
797 unsigned int cpu = smp_processor_id(); 805 unsigned int cpu = smp_processor_id();
798 unsigned int x = cpu % smp_width; 806 unsigned int x = cpu_x(cpu);
799 unsigned int y = cpu / smp_width; 807 unsigned int y = cpu_y(cpu);
800 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); 808 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
801 } 809 }
802 810
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index ac115307e5e4..8d5b40ff2922 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -39,12 +39,12 @@ ENTRY(_start)
39 } 39 }
40 { 40 {
41 moveli r0, _HV_VERSION_OLD_HV_INIT 41 moveli r0, _HV_VERSION_OLD_HV_INIT
42 jal hv_init 42 jal _hv_init
43 } 43 }
44 /* Get a reasonable default ASID in r0 */ 44 /* Get a reasonable default ASID in r0 */
45 { 45 {
46 move r0, zero 46 move r0, zero
47 jal hv_inquire_asid 47 jal _hv_inquire_asid
48 } 48 }
49 /* Install the default page table */ 49 /* Install the default page table */
50 { 50 {
@@ -64,7 +64,7 @@ ENTRY(_start)
64 auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET) 64 auli r0, r0, ha16(swapper_pg_dir - PAGE_OFFSET)
65 } 65 }
66 { 66 {
67 inv r6 67 finv r6
68 move r1, zero /* high 32 bits of CPA is zero */ 68 move r1, zero /* high 32 bits of CPA is zero */
69 } 69 }
70 { 70 {
@@ -73,12 +73,12 @@ ENTRY(_start)
73 } 73 }
74 { 74 {
75 auli lr, lr, ha16(1f) 75 auli lr, lr, ha16(1f)
76 j hv_install_context 76 j _hv_install_context
77 } 77 }
781: 781:
79 79
80 /* Get our processor number and save it away in SAVE_K_0. */ 80 /* Get our processor number and save it away in SAVE_K_0. */
81 jal hv_inquire_topology 81 jal _hv_inquire_topology
82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */ 82 mulll_uu r4, r1, r2 /* r1 == y, r2 == width */
83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */ 83 add r4, r4, r0 /* r0 == x, so r4 == cpu == y*width + x */
84 84
@@ -86,7 +86,7 @@ ENTRY(_start)
86 /* 86 /*
87 * Load up our per-cpu offset. When the first (master) tile 87 * Load up our per-cpu offset. When the first (master) tile
88 * boots, this value is still zero, so we will load boot_pc 88 * boots, this value is still zero, so we will load boot_pc
89 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. 89 * with start_kernel, and boot_sp at the top of init_stack.
90 * The master tile initializes the per-cpu offset array, so that 90 * The master tile initializes the per-cpu offset array, so that
91 * when subsequent (secondary) tiles boot, they will instead load 91 * when subsequent (secondary) tiles boot, they will instead load
92 * from their per-cpu versions of boot_sp and boot_pc. 92 * from their per-cpu versions of boot_sp and boot_pc.
@@ -126,7 +126,6 @@ ENTRY(_start)
126 lw sp, r1 126 lw sp, r1
127 or r4, sp, r4 127 or r4, sp, r4
128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ 128 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
129 addi sp, sp, -STACK_TOP_DELTA
130 { 129 {
131 move lr, zero /* stop backtraces in the called function */ 130 move lr, zero /* stop backtraces in the called function */
132 jr r0 131 jr r0
@@ -163,8 +162,8 @@ ENTRY(swapper_pg_dir)
163 .set addr, addr + PGDIR_SIZE 162 .set addr, addr + PGDIR_SIZE
164 .endr 163 .endr
165 164
166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 165 /* The true text VAs are mapped as VA = PA + MEM_SV_START */
167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 166 PTE MEM_SV_START, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 167 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
169 .org swapper_pg_dir + PGDIR_SIZE 168 .org swapper_pg_dir + PGDIR_SIZE
170 END(swapper_pg_dir) 169 END(swapper_pg_dir)
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6093964fa5c7..bd0e12f283f3 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -25,6 +25,15 @@
25#include <arch/chip.h> 25#include <arch/chip.h>
26#include <arch/spr_def.h> 26#include <arch/spr_def.h>
27 27
28/* Extract two 32-bit bit values that were read into one register. */
29#ifdef __BIG_ENDIAN__
30#define GET_FIRST_INT(rd, rs) shrsi rd, rs, 32
31#define GET_SECOND_INT(rd, rs) addxi rd, rs, 0
32#else
33#define GET_FIRST_INT(rd, rs) addxi rd, rs, 0
34#define GET_SECOND_INT(rd, rs) shrsi rd, rs, 32
35#endif
36
28/* 37/*
29 * This module contains the entry code for kernel images. It performs the 38 * This module contains the entry code for kernel images. It performs the
30 * minimal setup needed to call the generic C routines. 39 * minimal setup needed to call the generic C routines.
@@ -46,11 +55,11 @@ ENTRY(_start)
46 movei r2, TILE_CHIP_REV 55 movei r2, TILE_CHIP_REV
47 movei r3, KERNEL_PL 56 movei r3, KERNEL_PL
48 } 57 }
49 jal hv_init 58 jal _hv_init
50 /* Get a reasonable default ASID in r0 */ 59 /* Get a reasonable default ASID in r0 */
51 { 60 {
52 move r0, zero 61 move r0, zero
53 jal hv_inquire_asid 62 jal _hv_inquire_asid
54 } 63 }
55 64
56 /* 65 /*
@@ -61,7 +70,7 @@ ENTRY(_start)
61 * other CPUs should see a properly-constructed page table. 70 * other CPUs should see a properly-constructed page table.
62 */ 71 */
63 { 72 {
64 v4int_l r2, zero, r0 /* ASID for hv_install_context */ 73 GET_FIRST_INT(r2, r0) /* ASID for hv_install_context */
65 moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET) 74 moveli r4, hw1_last(swapper_pgprot - PAGE_OFFSET)
66 } 75 }
67 { 76 {
@@ -77,7 +86,7 @@ ENTRY(_start)
77 { 86 {
78 /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */ 87 /* After initializing swapper_pgprot, HV_PTE_GLOBAL is set. */
79 bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL 88 bfextu r7, r1, HV_PTE_INDEX_GLOBAL, HV_PTE_INDEX_GLOBAL
80 inv r4 89 finv r4
81 } 90 }
82 bnez r7, .Lno_write 91 bnez r7, .Lno_write
83 { 92 {
@@ -121,29 +130,24 @@ ENTRY(_start)
121 } 130 }
122 { 131 {
123 moveli r3, CTX_PAGE_FLAG 132 moveli r3, CTX_PAGE_FLAG
124 j hv_install_context 133 j _hv_install_context
125 } 134 }
1261: 1351:
127 136
128 /* Install the interrupt base. */ 137 /* Install the interrupt base. */
129 moveli r0, hw2_last(MEM_SV_START) 138 moveli r0, hw2_last(intrpt_start)
130 shl16insli r0, r0, hw1(MEM_SV_START) 139 shl16insli r0, r0, hw1(intrpt_start)
131 shl16insli r0, r0, hw0(MEM_SV_START) 140 shl16insli r0, r0, hw0(intrpt_start)
132 mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0 141 mtspr SPR_INTERRUPT_VECTOR_BASE_K, r0
133 142
134 /* 143 /* Get our processor number and save it away in SAVE_K_0. */
135 * Get our processor number and save it away in SAVE_K_0. 144 jal _hv_inquire_topology
136 * Extract stuff from the topology structure: r4 = y, r6 = x,
137 * r5 = width. FIXME: consider whether we want to just make these
138 * 64-bit values (and if so fix smp_topology write below, too).
139 */
140 jal hv_inquire_topology
141 { 145 {
142 v4int_l r5, zero, r1 /* r5 = width */ 146 GET_FIRST_INT(r5, r1) /* r5 = width */
143 shrui r4, r0, 32 /* r4 = y */ 147 GET_SECOND_INT(r4, r0) /* r4 = y */
144 } 148 }
145 { 149 {
146 v4int_l r6, zero, r0 /* r6 = x */ 150 GET_FIRST_INT(r6, r0) /* r6 = x */
147 mul_lu_lu r4, r4, r5 151 mul_lu_lu r4, r4, r5
148 } 152 }
149 { 153 {
@@ -154,7 +158,7 @@ ENTRY(_start)
154 /* 158 /*
155 * Load up our per-cpu offset. When the first (master) tile 159 * Load up our per-cpu offset. When the first (master) tile
156 * boots, this value is still zero, so we will load boot_pc 160 * boots, this value is still zero, so we will load boot_pc
157 * with start_kernel, and boot_sp with init_stack + THREAD_SIZE. 161 * with start_kernel, and boot_sp with at the top of init_stack.
158 * The master tile initializes the per-cpu offset array, so that 162 * The master tile initializes the per-cpu offset array, so that
159 * when subsequent (secondary) tiles boot, they will instead load 163 * when subsequent (secondary) tiles boot, they will instead load
160 * from their per-cpu versions of boot_sp and boot_pc. 164 * from their per-cpu versions of boot_sp and boot_pc.
@@ -198,9 +202,9 @@ ENTRY(_start)
198 } 202 }
199 ld r0, r0 203 ld r0, r0
200 ld sp, r1 204 ld sp, r1
201 or r4, sp, r4 205 shli r4, r4, CPU_SHIFT
206 bfins r4, sp, 0, CPU_SHIFT-1
202 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */ 207 mtspr SPR_SYSTEM_SAVE_K_0, r4 /* save ksp0 + cpu */
203 addi sp, sp, -STACK_TOP_DELTA
204 { 208 {
205 move lr, zero /* stop backtraces in the called function */ 209 move lr, zero /* stop backtraces in the called function */
206 jr r0 210 jr r0
diff --git a/arch/tile/kernel/hvglue.S b/arch/tile/kernel/hvglue.S
new file mode 100644
index 000000000000..2ab456622391
--- /dev/null
+++ b/arch/tile/kernel/hvglue.S
@@ -0,0 +1,74 @@
1/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
2.macro gensym sym, val, size
3.org \val
4.global _\sym
5.type _\sym,function
6_\sym:
7.size _\sym,\size
8#ifndef CONFIG_TILE_HVGLUE_TRACE
9.globl \sym
10.set \sym,_\sym
11#endif
12.endm
13
14.section .hvglue,"x",@nobits
15.align 8
16gensym hv_init, 0x20, 32
17gensym hv_install_context, 0x40, 32
18gensym hv_sysconf, 0x60, 32
19gensym hv_get_rtc, 0x80, 32
20gensym hv_set_rtc, 0xa0, 32
21gensym hv_flush_asid, 0xc0, 32
22gensym hv_flush_page, 0xe0, 32
23gensym hv_flush_pages, 0x100, 32
24gensym hv_restart, 0x120, 32
25gensym hv_halt, 0x140, 32
26gensym hv_power_off, 0x160, 32
27gensym hv_inquire_physical, 0x180, 32
28gensym hv_inquire_memory_controller, 0x1a0, 32
29gensym hv_inquire_virtual, 0x1c0, 32
30gensym hv_inquire_asid, 0x1e0, 32
31gensym hv_nanosleep, 0x200, 32
32gensym hv_console_read_if_ready, 0x220, 32
33gensym hv_console_write, 0x240, 32
34gensym hv_downcall_dispatch, 0x260, 32
35gensym hv_inquire_topology, 0x280, 32
36gensym hv_fs_findfile, 0x2a0, 32
37gensym hv_fs_fstat, 0x2c0, 32
38gensym hv_fs_pread, 0x2e0, 32
39gensym hv_physaddr_read64, 0x300, 32
40gensym hv_physaddr_write64, 0x320, 32
41gensym hv_get_command_line, 0x340, 32
42gensym hv_set_caching, 0x360, 32
43gensym hv_bzero_page, 0x380, 32
44gensym hv_register_message_state, 0x3a0, 32
45gensym hv_send_message, 0x3c0, 32
46gensym hv_receive_message, 0x3e0, 32
47gensym hv_inquire_context, 0x400, 32
48gensym hv_start_all_tiles, 0x420, 32
49gensym hv_dev_open, 0x440, 32
50gensym hv_dev_close, 0x460, 32
51gensym hv_dev_pread, 0x480, 32
52gensym hv_dev_pwrite, 0x4a0, 32
53gensym hv_dev_poll, 0x4c0, 32
54gensym hv_dev_poll_cancel, 0x4e0, 32
55gensym hv_dev_preada, 0x500, 32
56gensym hv_dev_pwritea, 0x520, 32
57gensym hv_flush_remote, 0x540, 32
58gensym hv_console_putc, 0x560, 32
59gensym hv_inquire_tiles, 0x580, 32
60gensym hv_confstr, 0x5a0, 32
61gensym hv_reexec, 0x5c0, 32
62gensym hv_set_command_line, 0x5e0, 32
63gensym hv_clear_intr, 0x600, 32
64gensym hv_enable_intr, 0x620, 32
65gensym hv_disable_intr, 0x640, 32
66gensym hv_raise_intr, 0x660, 32
67gensym hv_trigger_ipi, 0x680, 32
68gensym hv_store_mapping, 0x6a0, 32
69gensym hv_inquire_realpa, 0x6c0, 32
70gensym hv_flush_all, 0x6e0, 32
71gensym hv_get_ipi_pte, 0x700, 32
72gensym hv_set_pte_super_shift, 0x720, 32
73gensym hv_console_set_ipi, 0x7e0, 32
74gensym hv_glue_internals, 0x800, 30720
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
deleted file mode 100644
index d44c5a67a1ed..000000000000
--- a/arch/tile/kernel/hvglue.lds
+++ /dev/null
@@ -1,59 +0,0 @@
1/* Hypervisor call vector addresses; see <hv/hypervisor.h> */
2hv_init = TEXT_OFFSET + 0x10020;
3hv_install_context = TEXT_OFFSET + 0x10040;
4hv_sysconf = TEXT_OFFSET + 0x10060;
5hv_get_rtc = TEXT_OFFSET + 0x10080;
6hv_set_rtc = TEXT_OFFSET + 0x100a0;
7hv_flush_asid = TEXT_OFFSET + 0x100c0;
8hv_flush_page = TEXT_OFFSET + 0x100e0;
9hv_flush_pages = TEXT_OFFSET + 0x10100;
10hv_restart = TEXT_OFFSET + 0x10120;
11hv_halt = TEXT_OFFSET + 0x10140;
12hv_power_off = TEXT_OFFSET + 0x10160;
13hv_inquire_physical = TEXT_OFFSET + 0x10180;
14hv_inquire_memory_controller = TEXT_OFFSET + 0x101a0;
15hv_inquire_virtual = TEXT_OFFSET + 0x101c0;
16hv_inquire_asid = TEXT_OFFSET + 0x101e0;
17hv_nanosleep = TEXT_OFFSET + 0x10200;
18hv_console_read_if_ready = TEXT_OFFSET + 0x10220;
19hv_console_write = TEXT_OFFSET + 0x10240;
20hv_downcall_dispatch = TEXT_OFFSET + 0x10260;
21hv_inquire_topology = TEXT_OFFSET + 0x10280;
22hv_fs_findfile = TEXT_OFFSET + 0x102a0;
23hv_fs_fstat = TEXT_OFFSET + 0x102c0;
24hv_fs_pread = TEXT_OFFSET + 0x102e0;
25hv_physaddr_read64 = TEXT_OFFSET + 0x10300;
26hv_physaddr_write64 = TEXT_OFFSET + 0x10320;
27hv_get_command_line = TEXT_OFFSET + 0x10340;
28hv_set_caching = TEXT_OFFSET + 0x10360;
29hv_bzero_page = TEXT_OFFSET + 0x10380;
30hv_register_message_state = TEXT_OFFSET + 0x103a0;
31hv_send_message = TEXT_OFFSET + 0x103c0;
32hv_receive_message = TEXT_OFFSET + 0x103e0;
33hv_inquire_context = TEXT_OFFSET + 0x10400;
34hv_start_all_tiles = TEXT_OFFSET + 0x10420;
35hv_dev_open = TEXT_OFFSET + 0x10440;
36hv_dev_close = TEXT_OFFSET + 0x10460;
37hv_dev_pread = TEXT_OFFSET + 0x10480;
38hv_dev_pwrite = TEXT_OFFSET + 0x104a0;
39hv_dev_poll = TEXT_OFFSET + 0x104c0;
40hv_dev_poll_cancel = TEXT_OFFSET + 0x104e0;
41hv_dev_preada = TEXT_OFFSET + 0x10500;
42hv_dev_pwritea = TEXT_OFFSET + 0x10520;
43hv_flush_remote = TEXT_OFFSET + 0x10540;
44hv_console_putc = TEXT_OFFSET + 0x10560;
45hv_inquire_tiles = TEXT_OFFSET + 0x10580;
46hv_confstr = TEXT_OFFSET + 0x105a0;
47hv_reexec = TEXT_OFFSET + 0x105c0;
48hv_set_command_line = TEXT_OFFSET + 0x105e0;
49hv_clear_intr = TEXT_OFFSET + 0x10600;
50hv_enable_intr = TEXT_OFFSET + 0x10620;
51hv_disable_intr = TEXT_OFFSET + 0x10640;
52hv_raise_intr = TEXT_OFFSET + 0x10660;
53hv_trigger_ipi = TEXT_OFFSET + 0x10680;
54hv_store_mapping = TEXT_OFFSET + 0x106a0;
55hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
56hv_flush_all = TEXT_OFFSET + 0x106e0;
57hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
58hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
59hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/hvglue_trace.c b/arch/tile/kernel/hvglue_trace.c
new file mode 100644
index 000000000000..85c74ad29312
--- /dev/null
+++ b/arch/tile/kernel/hvglue_trace.c
@@ -0,0 +1,266 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/*
16 * Pull in the hypervisor header so we declare all the ABI functions
17 * with the underscore versions, then undef the names so that we can
18 * provide our own wrapper versions.
19 */
20#define hv_init _hv_init
21#define hv_install_context _hv_install_context
22#define hv_sysconf _hv_sysconf
23#define hv_get_rtc _hv_get_rtc
24#define hv_set_rtc _hv_set_rtc
25#define hv_flush_asid _hv_flush_asid
26#define hv_flush_page _hv_flush_page
27#define hv_flush_pages _hv_flush_pages
28#define hv_restart _hv_restart
29#define hv_halt _hv_halt
30#define hv_power_off _hv_power_off
31#define hv_inquire_physical _hv_inquire_physical
32#define hv_inquire_memory_controller _hv_inquire_memory_controller
33#define hv_inquire_virtual _hv_inquire_virtual
34#define hv_inquire_asid _hv_inquire_asid
35#define hv_nanosleep _hv_nanosleep
36#define hv_console_read_if_ready _hv_console_read_if_ready
37#define hv_console_write _hv_console_write
38#define hv_downcall_dispatch _hv_downcall_dispatch
39#define hv_inquire_topology _hv_inquire_topology
40#define hv_fs_findfile _hv_fs_findfile
41#define hv_fs_fstat _hv_fs_fstat
42#define hv_fs_pread _hv_fs_pread
43#define hv_physaddr_read64 _hv_physaddr_read64
44#define hv_physaddr_write64 _hv_physaddr_write64
45#define hv_get_command_line _hv_get_command_line
46#define hv_set_caching _hv_set_caching
47#define hv_bzero_page _hv_bzero_page
48#define hv_register_message_state _hv_register_message_state
49#define hv_send_message _hv_send_message
50#define hv_receive_message _hv_receive_message
51#define hv_inquire_context _hv_inquire_context
52#define hv_start_all_tiles _hv_start_all_tiles
53#define hv_dev_open _hv_dev_open
54#define hv_dev_close _hv_dev_close
55#define hv_dev_pread _hv_dev_pread
56#define hv_dev_pwrite _hv_dev_pwrite
57#define hv_dev_poll _hv_dev_poll
58#define hv_dev_poll_cancel _hv_dev_poll_cancel
59#define hv_dev_preada _hv_dev_preada
60#define hv_dev_pwritea _hv_dev_pwritea
61#define hv_flush_remote _hv_flush_remote
62#define hv_console_putc _hv_console_putc
63#define hv_inquire_tiles _hv_inquire_tiles
64#define hv_confstr _hv_confstr
65#define hv_reexec _hv_reexec
66#define hv_set_command_line _hv_set_command_line
67#define hv_clear_intr _hv_clear_intr
68#define hv_enable_intr _hv_enable_intr
69#define hv_disable_intr _hv_disable_intr
70#define hv_raise_intr _hv_raise_intr
71#define hv_trigger_ipi _hv_trigger_ipi
72#define hv_store_mapping _hv_store_mapping
73#define hv_inquire_realpa _hv_inquire_realpa
74#define hv_flush_all _hv_flush_all
75#define hv_get_ipi_pte _hv_get_ipi_pte
76#define hv_set_pte_super_shift _hv_set_pte_super_shift
77#define hv_console_set_ipi _hv_console_set_ipi
78#include <hv/hypervisor.h>
79#undef hv_init
80#undef hv_install_context
81#undef hv_sysconf
82#undef hv_get_rtc
83#undef hv_set_rtc
84#undef hv_flush_asid
85#undef hv_flush_page
86#undef hv_flush_pages
87#undef hv_restart
88#undef hv_halt
89#undef hv_power_off
90#undef hv_inquire_physical
91#undef hv_inquire_memory_controller
92#undef hv_inquire_virtual
93#undef hv_inquire_asid
94#undef hv_nanosleep
95#undef hv_console_read_if_ready
96#undef hv_console_write
97#undef hv_downcall_dispatch
98#undef hv_inquire_topology
99#undef hv_fs_findfile
100#undef hv_fs_fstat
101#undef hv_fs_pread
102#undef hv_physaddr_read64
103#undef hv_physaddr_write64
104#undef hv_get_command_line
105#undef hv_set_caching
106#undef hv_bzero_page
107#undef hv_register_message_state
108#undef hv_send_message
109#undef hv_receive_message
110#undef hv_inquire_context
111#undef hv_start_all_tiles
112#undef hv_dev_open
113#undef hv_dev_close
114#undef hv_dev_pread
115#undef hv_dev_pwrite
116#undef hv_dev_poll
117#undef hv_dev_poll_cancel
118#undef hv_dev_preada
119#undef hv_dev_pwritea
120#undef hv_flush_remote
121#undef hv_console_putc
122#undef hv_inquire_tiles
123#undef hv_confstr
124#undef hv_reexec
125#undef hv_set_command_line
126#undef hv_clear_intr
127#undef hv_enable_intr
128#undef hv_disable_intr
129#undef hv_raise_intr
130#undef hv_trigger_ipi
131#undef hv_store_mapping
132#undef hv_inquire_realpa
133#undef hv_flush_all
134#undef hv_get_ipi_pte
135#undef hv_set_pte_super_shift
136#undef hv_console_set_ipi
137
138/*
139 * Provide macros based on <linux/syscalls.h> to provide a wrapper
140 * function that invokes the same function with an underscore prefix.
141 * We can't use the existing __SC_xxx macros because we need to
142 * support up to nine arguments rather than up to six, and also this
143 * way the file stands alone from possible changes in the
144 * implementation of <linux/syscalls.h>.
145 */
146#define HV_WRAP0(type, name) \
147 type name(void); \
148 type name(void) \
149 { \
150 return _##name(); \
151 }
152#define __HV_DECL1(t1, a1) t1 a1
153#define __HV_DECL2(t2, a2, ...) t2 a2, __HV_DECL1(__VA_ARGS__)
154#define __HV_DECL3(t3, a3, ...) t3 a3, __HV_DECL2(__VA_ARGS__)
155#define __HV_DECL4(t4, a4, ...) t4 a4, __HV_DECL3(__VA_ARGS__)
156#define __HV_DECL5(t5, a5, ...) t5 a5, __HV_DECL4(__VA_ARGS__)
157#define __HV_DECL6(t6, a6, ...) t6 a6, __HV_DECL5(__VA_ARGS__)
158#define __HV_DECL7(t7, a7, ...) t7 a7, __HV_DECL6(__VA_ARGS__)
159#define __HV_DECL8(t8, a8, ...) t8 a8, __HV_DECL7(__VA_ARGS__)
160#define __HV_DECL9(t9, a9, ...) t9 a9, __HV_DECL8(__VA_ARGS__)
161#define __HV_PASS1(t1, a1) a1
162#define __HV_PASS2(t2, a2, ...) a2, __HV_PASS1(__VA_ARGS__)
163#define __HV_PASS3(t3, a3, ...) a3, __HV_PASS2(__VA_ARGS__)
164#define __HV_PASS4(t4, a4, ...) a4, __HV_PASS3(__VA_ARGS__)
165#define __HV_PASS5(t5, a5, ...) a5, __HV_PASS4(__VA_ARGS__)
166#define __HV_PASS6(t6, a6, ...) a6, __HV_PASS5(__VA_ARGS__)
167#define __HV_PASS7(t7, a7, ...) a7, __HV_PASS6(__VA_ARGS__)
168#define __HV_PASS8(t8, a8, ...) a8, __HV_PASS7(__VA_ARGS__)
169#define __HV_PASS9(t9, a9, ...) a9, __HV_PASS8(__VA_ARGS__)
170#define HV_WRAPx(x, type, name, ...) \
171 type name(__HV_DECL##x(__VA_ARGS__)); \
172 type name(__HV_DECL##x(__VA_ARGS__)) \
173 { \
174 return _##name(__HV_PASS##x(__VA_ARGS__)); \
175 }
176#define HV_WRAP1(type, name, ...) HV_WRAPx(1, type, name, __VA_ARGS__)
177#define HV_WRAP2(type, name, ...) HV_WRAPx(2, type, name, __VA_ARGS__)
178#define HV_WRAP3(type, name, ...) HV_WRAPx(3, type, name, __VA_ARGS__)
179#define HV_WRAP4(type, name, ...) HV_WRAPx(4, type, name, __VA_ARGS__)
180#define HV_WRAP5(type, name, ...) HV_WRAPx(5, type, name, __VA_ARGS__)
181#define HV_WRAP6(type, name, ...) HV_WRAPx(6, type, name, __VA_ARGS__)
182#define HV_WRAP7(type, name, ...) HV_WRAPx(7, type, name, __VA_ARGS__)
183#define HV_WRAP8(type, name, ...) HV_WRAPx(8, type, name, __VA_ARGS__)
184#define HV_WRAP9(type, name, ...) HV_WRAPx(9, type, name, __VA_ARGS__)
185
186/* List all the hypervisor API functions. */
187HV_WRAP4(void, hv_init, HV_VersionNumber, interface_version_number,
188 int, chip_num, int, chip_rev_num, int, client_pl)
189HV_WRAP1(long, hv_sysconf, HV_SysconfQuery, query)
190HV_WRAP3(int, hv_confstr, HV_ConfstrQuery, query, HV_VirtAddr, buf, int, len)
191#if CHIP_HAS_IPI()
192HV_WRAP3(int, hv_get_ipi_pte, HV_Coord, tile, int, pl, HV_PTE*, pte)
193HV_WRAP3(int, hv_console_set_ipi, int, ipi, int, event, HV_Coord, coord);
194#else
195HV_WRAP1(void, hv_enable_intr, HV_IntrMask, enab_mask)
196HV_WRAP1(void, hv_disable_intr, HV_IntrMask, disab_mask)
197HV_WRAP1(void, hv_clear_intr, HV_IntrMask, clear_mask)
198HV_WRAP1(void, hv_raise_intr, HV_IntrMask, raise_mask)
199HV_WRAP2(HV_Errno, hv_trigger_ipi, HV_Coord, tile, int, interrupt)
200#endif /* !CHIP_HAS_IPI() */
201HV_WRAP3(int, hv_store_mapping, HV_VirtAddr, va, unsigned int, len,
202 HV_PhysAddr, pa)
203HV_WRAP2(HV_PhysAddr, hv_inquire_realpa, HV_PhysAddr, cpa, unsigned int, len)
204HV_WRAP0(HV_RTCTime, hv_get_rtc)
205HV_WRAP1(void, hv_set_rtc, HV_RTCTime, time)
206HV_WRAP4(int, hv_install_context, HV_PhysAddr, page_table, HV_PTE, access,
207 HV_ASID, asid, __hv32, flags)
208HV_WRAP2(int, hv_set_pte_super_shift, int, level, int, log2_count)
209HV_WRAP0(HV_Context, hv_inquire_context)
210HV_WRAP1(int, hv_flush_asid, HV_ASID, asid)
211HV_WRAP2(int, hv_flush_page, HV_VirtAddr, address, HV_PageSize, page_size)
212HV_WRAP3(int, hv_flush_pages, HV_VirtAddr, start, HV_PageSize, page_size,
213 unsigned long, size)
214HV_WRAP1(int, hv_flush_all, int, preserve_global)
215HV_WRAP2(void, hv_restart, HV_VirtAddr, cmd, HV_VirtAddr, args)
216HV_WRAP0(void, hv_halt)
217HV_WRAP0(void, hv_power_off)
218HV_WRAP1(int, hv_reexec, HV_PhysAddr, entry)
219HV_WRAP0(HV_Topology, hv_inquire_topology)
220HV_WRAP3(HV_Errno, hv_inquire_tiles, HV_InqTileSet, set, HV_VirtAddr, cpumask,
221 int, length)
222HV_WRAP1(HV_PhysAddrRange, hv_inquire_physical, int, idx)
223HV_WRAP2(HV_MemoryControllerInfo, hv_inquire_memory_controller, HV_Coord, coord,
224 int, controller)
225HV_WRAP1(HV_VirtAddrRange, hv_inquire_virtual, int, idx)
226HV_WRAP1(HV_ASIDRange, hv_inquire_asid, int, idx)
227HV_WRAP1(void, hv_nanosleep, int, nanosecs)
228HV_WRAP0(int, hv_console_read_if_ready)
229HV_WRAP1(void, hv_console_putc, int, byte)
230HV_WRAP2(int, hv_console_write, HV_VirtAddr, bytes, int, len)
231HV_WRAP0(void, hv_downcall_dispatch)
232HV_WRAP1(int, hv_fs_findfile, HV_VirtAddr, filename)
233HV_WRAP1(HV_FS_StatInfo, hv_fs_fstat, int, inode)
234HV_WRAP4(int, hv_fs_pread, int, inode, HV_VirtAddr, buf,
235 int, length, int, offset)
236HV_WRAP2(unsigned long long, hv_physaddr_read64, HV_PhysAddr, addr,
237 HV_PTE, access)
238HV_WRAP3(void, hv_physaddr_write64, HV_PhysAddr, addr, HV_PTE, access,
239 unsigned long long, val)
240HV_WRAP2(int, hv_get_command_line, HV_VirtAddr, buf, int, length)
241HV_WRAP2(HV_Errno, hv_set_command_line, HV_VirtAddr, buf, int, length)
242HV_WRAP1(void, hv_set_caching, unsigned long, bitmask)
243HV_WRAP2(void, hv_bzero_page, HV_VirtAddr, va, unsigned int, size)
244HV_WRAP1(HV_Errno, hv_register_message_state, HV_MsgState*, msgstate)
245HV_WRAP4(int, hv_send_message, HV_Recipient *, recips, int, nrecip,
246 HV_VirtAddr, buf, int, buflen)
247HV_WRAP3(HV_RcvMsgInfo, hv_receive_message, HV_MsgState, msgstate,
248 HV_VirtAddr, buf, int, buflen)
249HV_WRAP0(void, hv_start_all_tiles)
250HV_WRAP2(int, hv_dev_open, HV_VirtAddr, name, __hv32, flags)
251HV_WRAP1(int, hv_dev_close, int, devhdl)
252HV_WRAP5(int, hv_dev_pread, int, devhdl, __hv32, flags, HV_VirtAddr, va,
253 __hv32, len, __hv64, offset)
254HV_WRAP5(int, hv_dev_pwrite, int, devhdl, __hv32, flags, HV_VirtAddr, va,
255 __hv32, len, __hv64, offset)
256HV_WRAP3(int, hv_dev_poll, int, devhdl, __hv32, events, HV_IntArg, intarg)
257HV_WRAP1(int, hv_dev_poll_cancel, int, devhdl)
258HV_WRAP6(int, hv_dev_preada, int, devhdl, __hv32, flags, __hv32, sgl_len,
259 HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
260HV_WRAP6(int, hv_dev_pwritea, int, devhdl, __hv32, flags, __hv32, sgl_len,
261 HV_SGL *, sglp, __hv64, offset, HV_IntArg, intarg)
262HV_WRAP9(int, hv_flush_remote, HV_PhysAddr, cache_pa,
263 unsigned long, cache_control, unsigned long*, cache_cpumask,
264 HV_VirtAddr, tlb_va, unsigned long, tlb_length,
265 unsigned long, tlb_pgsize, unsigned long*, tlb_cpumask,
266 HV_Remote_ASID*, asids, int, asidcount)
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index cb52d66343ed..088d5c141e68 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -28,20 +28,10 @@
28#include <arch/interrupts.h> 28#include <arch/interrupts.h>
29#include <arch/spr_def.h> 29#include <arch/spr_def.h>
30 30
31#ifdef CONFIG_PREEMPT
32# error "No support for kernel preemption currently"
33#endif
34
35#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 31#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
36 32
37#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 33#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
38 34
39#if !CHIP_HAS_WH64()
40 /* By making this an empty macro, we can use wh64 in the code. */
41 .macro wh64 reg
42 .endm
43#endif
44
45 .macro push_reg reg, ptr=sp, delta=-4 35 .macro push_reg reg, ptr=sp, delta=-4
46 { 36 {
47 sw \ptr, \reg 37 sw \ptr, \reg
@@ -189,7 +179,7 @@ intvec_\vecname:
189 * point sp at the top aligned address on the actual stack page. 179 * point sp at the top aligned address on the actual stack page.
190 */ 180 */
191 mfspr r0, SPR_SYSTEM_SAVE_K_0 181 mfspr r0, SPR_SYSTEM_SAVE_K_0
192 mm r0, r0, zero, LOG2_THREAD_SIZE, 31 182 mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
193 183
1940: 1840:
195 /* 185 /*
@@ -207,6 +197,9 @@ intvec_\vecname:
207 * cache line 1: r14...r29 197 * cache line 1: r14...r29
208 * cache line 0: 2 x frame, r0..r13 198 * cache line 0: 2 x frame, r0..r13
209 */ 199 */
200#if STACK_TOP_DELTA != 64
201#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
202#endif
210 andi r0, r0, -64 203 andi r0, r0, -64
211 204
212 /* 205 /*
@@ -326,18 +319,14 @@ intvec_\vecname:
326 movei r3, -1 /* not used, but set for consistency */ 319 movei r3, -1 /* not used, but set for consistency */
327 } 320 }
328 .else 321 .else
329#if CHIP_HAS_AUX_PERF_COUNTERS()
330 .ifc \c_routine, op_handle_aux_perf_interrupt 322 .ifc \c_routine, op_handle_aux_perf_interrupt
331 { 323 {
332 mfspr r2, AUX_PERF_COUNT_STS 324 mfspr r2, AUX_PERF_COUNT_STS
333 movei r3, -1 /* not used, but set for consistency */ 325 movei r3, -1 /* not used, but set for consistency */
334 } 326 }
335 .else 327 .else
336#endif
337 movei r3, 0 328 movei r3, 0
338#if CHIP_HAS_AUX_PERF_COUNTERS()
339 .endif 329 .endif
340#endif
341 .endif 330 .endif
342 .endif 331 .endif
343 .endif 332 .endif
@@ -354,7 +343,7 @@ intvec_\vecname:
354#ifdef __COLLECT_LINKER_FEEDBACK__ 343#ifdef __COLLECT_LINKER_FEEDBACK__
355 .pushsection .text.intvec_feedback,"ax" 344 .pushsection .text.intvec_feedback,"ax"
356 .org (\vecnum << 5) 345 .org (\vecnum << 5)
357 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) 346 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
358 jrp lr 347 jrp lr
359 .popsection 348 .popsection
360#endif 349#endif
@@ -468,7 +457,7 @@ intvec_\vecname:
468 } 457 }
469 { 458 {
470 auli r21, r21, ha16(__per_cpu_offset) 459 auli r21, r21, ha16(__per_cpu_offset)
471 mm r20, r20, zero, 0, LOG2_THREAD_SIZE-1 460 mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
472 } 461 }
473 s2a r20, r20, r21 462 s2a r20, r20, r21
474 lw tp, r20 463 lw tp, r20
@@ -562,7 +551,6 @@ intvec_\vecname:
562 .endif 551 .endif
563 mtspr INTERRUPT_CRITICAL_SECTION, zero 552 mtspr INTERRUPT_CRITICAL_SECTION, zero
564 553
565#if CHIP_HAS_WH64()
566 /* 554 /*
567 * Prepare the first 256 stack bytes to be rapidly accessible 555 * Prepare the first 256 stack bytes to be rapidly accessible
568 * without having to fetch the background data. We don't really 556 * without having to fetch the background data. We don't really
@@ -583,7 +571,6 @@ intvec_\vecname:
583 addi r52, r52, -64 571 addi r52, r52, -64
584 } 572 }
585 wh64 r52 573 wh64 r52
586#endif
587 574
588#ifdef CONFIG_TRACE_IRQFLAGS 575#ifdef CONFIG_TRACE_IRQFLAGS
589 .ifnc \function,handle_nmi 576 .ifnc \function,handle_nmi
@@ -762,7 +749,7 @@ intvec_\vecname:
762 .macro dc_dispatch vecnum, vecname 749 .macro dc_dispatch vecnum, vecname
763 .org (\vecnum << 8) 750 .org (\vecnum << 8)
764intvec_\vecname: 751intvec_\vecname:
765 j hv_downcall_dispatch 752 j _hv_downcall_dispatch
766 ENDPROC(intvec_\vecname) 753 ENDPROC(intvec_\vecname)
767 .endm 754 .endm
768 755
@@ -812,17 +799,34 @@ STD_ENTRY(interrupt_return)
812 } 799 }
813 lw r29, r29 800 lw r29, r29
814 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 801 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
802 bzt r29, .Lresume_userspace
803
804#ifdef CONFIG_PREEMPT
805 /* Returning to kernel space. Check if we need preemption. */
806 GET_THREAD_INFO(r29)
807 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
815 { 808 {
816 bzt r29, .Lresume_userspace 809 lw r28, r28
817 PTREGS_PTR(r29, PTREGS_OFFSET_PC) 810 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
818 } 811 }
812 {
813 andi r28, r28, _TIF_NEED_RESCHED
814 lw r29, r29
815 }
816 bzt r28, 1f
817 bnz r29, 1f
818 jal preempt_schedule_irq
819 FEEDBACK_REENTER(interrupt_return)
8201:
821#endif
819 822
820 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ 823 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
821 { 824 {
822 lw r28, r29 825 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
823 moveli r27, lo16(_cpu_idle_nap) 826 moveli r27, lo16(_cpu_idle_nap)
824 } 827 }
825 { 828 {
829 lw r28, r29
826 auli r27, r27, ha16(_cpu_idle_nap) 830 auli r27, r27, ha16(_cpu_idle_nap)
827 } 831 }
828 { 832 {
@@ -1420,7 +1424,6 @@ handle_ill:
1420 { 1424 {
1421 lw r0, r0 /* indirect thru thread_info to get task_info*/ 1425 lw r0, r0 /* indirect thru thread_info to get task_info*/
1422 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */ 1426 addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
1423 move r2, zero /* load error code into r2 */
1424 } 1427 }
1425 1428
1426 jal send_sigtrap /* issue a SIGTRAP */ 1429 jal send_sigtrap /* issue a SIGTRAP */
@@ -1518,12 +1521,10 @@ STD_ENTRY(_sys_clone)
1518 __HEAD 1521 __HEAD
1519 .align 64 1522 .align 64
1520 /* Align much later jump on the start of a cache line. */ 1523 /* Align much later jump on the start of a cache line. */
1521#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
1522 nop 1524 nop
1523#if PAGE_SIZE >= 0x10000 1525#if PAGE_SIZE >= 0x10000
1524 nop 1526 nop
1525#endif 1527#endif
1526#endif
1527ENTRY(sys_cmpxchg) 1528ENTRY(sys_cmpxchg)
1528 1529
1529 /* 1530 /*
@@ -1557,45 +1558,6 @@ ENTRY(sys_cmpxchg)
1557# error Code here assumes PAGE_OFFSET can be loaded with just hi16() 1558# error Code here assumes PAGE_OFFSET can be loaded with just hi16()
1558#endif 1559#endif
1559 1560
1560#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1561 {
1562 /* Check for unaligned input. */
1563 bnz sp, .Lcmpxchg_badaddr
1564 mm r25, r0, zero, 3, PAGE_SHIFT-1
1565 }
1566 {
1567 crc32_32 r25, zero, r25
1568 moveli r21, lo16(atomic_lock_ptr)
1569 }
1570 {
1571 auli r21, r21, ha16(atomic_lock_ptr)
1572 auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
1573 }
1574 {
1575 shri r20, r25, 32 - ATOMIC_HASH_L1_SHIFT
1576 slt_u r23, r0, r23
1577 lw r26, r0 /* see comment in the "#else" for the "lw r26". */
1578 }
1579 {
1580 s2a r21, r20, r21
1581 bbns r23, .Lcmpxchg_badaddr
1582 }
1583 {
1584 lw r21, r21
1585 seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
1586 andi r25, r25, ATOMIC_HASH_L2_SIZE - 1
1587 }
1588 {
1589 /* Branch away at this point if we're doing a 64-bit cmpxchg. */
1590 bbs r23, .Lcmpxchg64
1591 andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
1592 }
1593 {
1594 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1595 j .Lcmpxchg32_tns /* see comment in the #else for the jump. */
1596 }
1597
1598#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1599 { 1561 {
1600 /* Check for unaligned input. */ 1562 /* Check for unaligned input. */
1601 bnz sp, .Lcmpxchg_badaddr 1563 bnz sp, .Lcmpxchg_badaddr
@@ -1609,7 +1571,7 @@ ENTRY(sys_cmpxchg)
1609 * Because of C pointer arithmetic, we want to compute this: 1571 * Because of C pointer arithmetic, we want to compute this:
1610 * 1572 *
1611 * ((char*)atomic_locks + 1573 * ((char*)atomic_locks +
1612 * (((r0 >> 3) & (1 << (ATOMIC_HASH_SIZE - 1))) << 2)) 1574 * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
1613 * 1575 *
1614 * Instead of two shifts we just ">> 1", and use 'mm' 1576 * Instead of two shifts we just ">> 1", and use 'mm'
1615 * to ignore the low and high bits we don't want. 1577 * to ignore the low and high bits we don't want.
@@ -1620,12 +1582,9 @@ ENTRY(sys_cmpxchg)
1620 1582
1621 /* 1583 /*
1622 * Ensure that the TLB is loaded before we take out the lock. 1584 * Ensure that the TLB is loaded before we take out the lock.
1623 * On tilepro, this will start fetching the value all the way 1585 * This will start fetching the value all the way into our L1
1624 * into our L1 as well (and if it gets modified before we 1586 * as well (and if it gets modified before we grab the lock,
1625 * grab the lock, it will be invalidated from our cache 1587 * it will be invalidated from our cache before we reload it).
1626 * before we reload it). On tile64, we'll start fetching it
1627 * into our L1 if we're the home, and if we're not, we'll
1628 * still at least start fetching it into the home's L2.
1629 */ 1588 */
1630 lw r26, r0 1589 lw r26, r0
1631 } 1590 }
@@ -1668,8 +1627,6 @@ ENTRY(sys_cmpxchg)
1668 j .Lcmpxchg32_tns 1627 j .Lcmpxchg32_tns
1669 } 1628 }
1670 1629
1671#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
1672
1673/* Symbol for do_page_fault_ics() to use to compare against the PC. */ 1630/* Symbol for do_page_fault_ics() to use to compare against the PC. */
1674.global __sys_cmpxchg_grab_lock 1631.global __sys_cmpxchg_grab_lock
1675__sys_cmpxchg_grab_lock: 1632__sys_cmpxchg_grab_lock:
@@ -1807,9 +1764,6 @@ __sys_cmpxchg_grab_lock:
1807 .align 64 1764 .align 64
1808.Lcmpxchg64: 1765.Lcmpxchg64:
1809 { 1766 {
1810#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
1811 s2a ATOMIC_LOCK_REG_NAME, r25, r21
1812#endif
1813 bzt r23, .Lcmpxchg64_tns 1767 bzt r23, .Lcmpxchg64_tns
1814 } 1768 }
1815 j .Lcmpxchg_badaddr 1769 j .Lcmpxchg_badaddr
@@ -1875,8 +1829,8 @@ int_unalign:
1875 push_extra_callee_saves r0 1829 push_extra_callee_saves r0
1876 j do_trap 1830 j do_trap
1877 1831
1878/* Include .intrpt1 array of interrupt vectors */ 1832/* Include .intrpt array of interrupt vectors */
1879 .section ".intrpt1", "ax" 1833 .section ".intrpt", "ax"
1880 1834
1881#define op_handle_perf_interrupt bad_intr 1835#define op_handle_perf_interrupt bad_intr
1882#define op_handle_aux_perf_interrupt bad_intr 1836#define op_handle_aux_perf_interrupt bad_intr
@@ -1944,10 +1898,8 @@ int_unalign:
1944 do_page_fault 1898 do_page_fault
1945 int_hand INT_SN_CPL, SN_CPL, bad_intr 1899 int_hand INT_SN_CPL, SN_CPL, bad_intr
1946 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap 1900 int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
1947#if CHIP_HAS_AUX_PERF_COUNTERS()
1948 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \ 1901 int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
1949 op_handle_aux_perf_interrupt, handle_nmi 1902 op_handle_aux_perf_interrupt, handle_nmi
1950#endif
1951 1903
1952 /* Synthetic interrupt delivered only by the simulator */ 1904 /* Synthetic interrupt delivered only by the simulator */
1953 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint 1905 int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 85d483957027..ec755d3f3734 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -17,25 +17,33 @@
17#include <linux/linkage.h> 17#include <linux/linkage.h>
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/unistd.h> 19#include <linux/unistd.h>
20#include <linux/init.h>
20#include <asm/ptrace.h> 21#include <asm/ptrace.h>
21#include <asm/thread_info.h> 22#include <asm/thread_info.h>
22#include <asm/irqflags.h> 23#include <asm/irqflags.h>
23#include <asm/asm-offsets.h> 24#include <asm/asm-offsets.h>
24#include <asm/types.h> 25#include <asm/types.h>
26#include <asm/traps.h>
25#include <asm/signal.h> 27#include <asm/signal.h>
26#include <hv/hypervisor.h> 28#include <hv/hypervisor.h>
27#include <arch/abi.h> 29#include <arch/abi.h>
28#include <arch/interrupts.h> 30#include <arch/interrupts.h>
29#include <arch/spr_def.h> 31#include <arch/spr_def.h>
30 32
31#ifdef CONFIG_PREEMPT
32# error "No support for kernel preemption currently"
33#endif
34
35#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg) 33#define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
36 34
37#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR) 35#define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
38 36
37#if CONFIG_KERNEL_PL == 1 || CONFIG_KERNEL_PL == 2
38/*
39 * Set "result" non-zero if ex1 holds the PL of the kernel
40 * (with or without ICS being set). Note this works only
41 * because we never find the PL at level 3.
42 */
43# define IS_KERNEL_EX1(result, ex1) andi result, ex1, CONFIG_KERNEL_PL
44#else
45# error Recode IS_KERNEL_EX1 for CONFIG_KERNEL_PL
46#endif
39 47
40 .macro push_reg reg, ptr=sp, delta=-8 48 .macro push_reg reg, ptr=sp, delta=-8
41 { 49 {
@@ -98,6 +106,185 @@
98 } 106 }
99 .endm 107 .endm
100 108
109 /*
110 * Unalign data exception fast handling: In order to handle
111 * unaligned data access, a fast JIT version is generated and stored
112 * in a specific area in user space. We first need to do a quick poke
113 * to see if the JIT is available. We use certain bits in the fault
114 * PC (3 to 9 is used for 16KB page size) as index to address the JIT
115 * code area. The first 64bit word is the fault PC, and the 2nd one is
116 * the fault bundle itself. If these 2 words both match, then we
117 * directly "iret" to JIT code. If not, a slow path is invoked to
118 * generate new JIT code. Note: the current JIT code WILL be
119 * overwritten if it existed. So, ideally we can handle 128 unalign
120 * fixups via JIT. For lookup efficiency and to effectively support
121 * tight loops with multiple unaligned reference, a simple
122 * direct-mapped cache is used.
123 *
124 * SPR_EX_CONTEXT_K_0 is modified to return to JIT code.
125 * SPR_EX_CONTEXT_K_1 has ICS set.
126 * SPR_EX_CONTEXT_0_0 is setup to user program's next PC.
127 * SPR_EX_CONTEXT_0_1 = 0.
128 */
129 .macro int_hand_unalign_fast vecnum, vecname
130 .org (\vecnum << 8)
131intvec_\vecname:
132 /* Put r3 in SPR_SYSTEM_SAVE_K_1. */
133 mtspr SPR_SYSTEM_SAVE_K_1, r3
134
135 mfspr r3, SPR_EX_CONTEXT_K_1
136 /*
137 * Examine if exception comes from user without ICS set.
138 * If not, just go directly to the slow path.
139 */
140 bnez r3, hand_unalign_slow_nonuser
141
142 mfspr r3, SPR_SYSTEM_SAVE_K_0
143
144 /* Get &thread_info->unalign_jit_tmp[0] in r3. */
145 bfexts r3, r3, 0, CPU_SHIFT-1
146 mm r3, zero, LOG2_THREAD_SIZE, 63
147 addli r3, r3, THREAD_INFO_UNALIGN_JIT_TMP_OFFSET
148
149 /*
150 * Save r0, r1, r2 into thread_info array r3 points to
151 * from low to high memory in order.
152 */
153 st_add r3, r0, 8
154 st_add r3, r1, 8
155 {
156 st_add r3, r2, 8
157 andi r2, sp, 7
158 }
159
160 /* Save stored r3 value so we can revert it on a page fault. */
161 mfspr r1, SPR_SYSTEM_SAVE_K_1
162 st r3, r1
163
164 {
165 /* Generate a SIGBUS if sp is not 8-byte aligned. */
166 bnez r2, hand_unalign_slow_badsp
167 }
168
169 /*
170 * Get the thread_info in r0; load r1 with pc. Set the low bit of sp
171 * as an indicator to the page fault code in case we fault.
172 */
173 {
174 ori sp, sp, 1
175 mfspr r1, SPR_EX_CONTEXT_K_0
176 }
177
178 /* Add the jit_info offset in thread_info; extract r1 [3:9] into r2. */
179 {
180 addli r0, r3, THREAD_INFO_UNALIGN_JIT_BASE_OFFSET - \
181 (THREAD_INFO_UNALIGN_JIT_TMP_OFFSET + (3 * 8))
182 bfextu r2, r1, 3, (2 + PAGE_SHIFT - UNALIGN_JIT_SHIFT)
183 }
184
185 /* Load the jit_info; multiply r2 by 128. */
186 {
187 ld r0, r0
188 shli r2, r2, UNALIGN_JIT_SHIFT
189 }
190
191 /*
192 * If r0 is NULL, the JIT page is not mapped, so go to slow path;
193 * add offset r2 to r0 at the same time.
194 */
195 {
196 beqz r0, hand_unalign_slow
197 add r2, r0, r2
198 }
199
200 /*
201 * We are loading from userspace (both the JIT info PC and
202 * instruction word, and the instruction word we executed)
203 * and since either could fault while holding the interrupt
204 * critical section, we must tag this region and check it in
205 * do_page_fault() to handle it properly.
206 */
207ENTRY(__start_unalign_asm_code)
208
209 /* Load first word of JIT in r0 and increment r2 by 8. */
210 ld_add r0, r2, 8
211
212 /*
213 * Compare the PC with the 1st word in JIT; load the fault bundle
214 * into r1.
215 */
216 {
217 cmpeq r0, r0, r1
218 ld r1, r1
219 }
220
221 /* Go to slow path if PC doesn't match. */
222 beqz r0, hand_unalign_slow
223
224 /*
225 * Load the 2nd word of JIT, which is supposed to be the fault
226 * bundle for a cache hit. Increment r2; after this bundle r2 will
227 * point to the potential start of the JIT code we want to run.
228 */
229 ld_add r0, r2, 8
230
231 /* No further accesses to userspace are done after this point. */
232ENTRY(__end_unalign_asm_code)
233
234 /* Compare the real bundle with what is saved in the JIT area. */
235 {
236 cmpeq r0, r1, r0
237 mtspr SPR_EX_CONTEXT_0_1, zero
238 }
239
240 /* Go to slow path if the fault bundle does not match. */
241 beqz r0, hand_unalign_slow
242
243 /*
244 * A cache hit is found.
245 * r2 points to start of JIT code (3rd word).
246 * r0 is the fault pc.
247 * r1 is the fault bundle.
248 * Reset the low bit of sp.
249 */
250 {
251 mfspr r0, SPR_EX_CONTEXT_K_0
252 andi sp, sp, ~1
253 }
254
255 /* Write r2 into EX_CONTEXT_K_0 and increment PC. */
256 {
257 mtspr SPR_EX_CONTEXT_K_0, r2
258 addi r0, r0, 8
259 }
260
261 /*
262 * Set ICS on kernel EX_CONTEXT_K_1 in order to "iret" to
263 * user with ICS set. This way, if the JIT fixup causes another
264 * unalign exception (which shouldn't be possible) the user
265 * process will be terminated with SIGBUS. Also, our fixup will
266 * run without interleaving with external interrupts.
267 * Each fixup is at most 14 bundles, so it won't hold ICS for long.
268 */
269 {
270 movei r1, PL_ICS_EX1(USER_PL, 1)
271 mtspr SPR_EX_CONTEXT_0_0, r0
272 }
273
274 {
275 mtspr SPR_EX_CONTEXT_K_1, r1
276 addi r3, r3, -(3 * 8)
277 }
278
279 /* Restore r0..r3. */
280 ld_add r0, r3, 8
281 ld_add r1, r3, 8
282 ld_add r2, r3, 8
283 ld r3, r3
284
285 iret
286 ENDPROC(intvec_\vecname)
287 .endm
101 288
102#ifdef __COLLECT_LINKER_FEEDBACK__ 289#ifdef __COLLECT_LINKER_FEEDBACK__
103 .pushsection .text.intvec_feedback,"ax" 290 .pushsection .text.intvec_feedback,"ax"
@@ -118,15 +305,21 @@ intvec_feedback:
118 * The "processing" argument specifies the code for processing 305 * The "processing" argument specifies the code for processing
119 * the interrupt. Defaults to "handle_interrupt". 306 * the interrupt. Defaults to "handle_interrupt".
120 */ 307 */
121 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt 308 .macro __int_hand vecnum, vecname, c_routine,processing=handle_interrupt
122 .org (\vecnum << 8)
123intvec_\vecname: 309intvec_\vecname:
124 /* Temporarily save a register so we have somewhere to work. */ 310 /* Temporarily save a register so we have somewhere to work. */
125 311
126 mtspr SPR_SYSTEM_SAVE_K_1, r0 312 mtspr SPR_SYSTEM_SAVE_K_1, r0
127 mfspr r0, SPR_EX_CONTEXT_K_1 313 mfspr r0, SPR_EX_CONTEXT_K_1
128 314
129 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 315 /*
316 * The unalign data fastpath code sets the low bit in sp to
317 * force us to reset it here on fault.
318 */
319 {
320 blbs sp, 2f
321 IS_KERNEL_EX1(r0, r0)
322 }
130 323
131 .ifc \vecnum, INT_DOUBLE_FAULT 324 .ifc \vecnum, INT_DOUBLE_FAULT
132 /* 325 /*
@@ -176,15 +369,15 @@ intvec_\vecname:
176 } 369 }
177 .endif 370 .endif
178 371
179 3722:
180 /* 373 /*
181 * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and 374 * SYSTEM_SAVE_K_0 holds the cpu number in the high bits, and
182 * the current stack top in the higher bits. So we recover 375 * the current stack top in the lower bits. So we recover
183 * our stack top by just masking off the low bits, then 376 * our starting stack value by sign-extending the low bits, then
184 * point sp at the top aligned address on the actual stack page. 377 * point sp at the top aligned address on the actual stack page.
185 */ 378 */
186 mfspr r0, SPR_SYSTEM_SAVE_K_0 379 mfspr r0, SPR_SYSTEM_SAVE_K_0
187 mm r0, zero, LOG2_THREAD_SIZE, 63 380 bfexts r0, r0, 0, CPU_SHIFT-1
188 381
1890: 3820:
190 /* 383 /*
@@ -206,6 +399,9 @@ intvec_\vecname:
206 * cache line 1: r6...r13 399 * cache line 1: r6...r13
207 * cache line 0: 2 x frame, r0..r5 400 * cache line 0: 2 x frame, r0..r5
208 */ 401 */
402#if STACK_TOP_DELTA != 64
403#error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
404#endif
209 andi r0, r0, -64 405 andi r0, r0, -64
210 406
211 /* 407 /*
@@ -305,7 +501,7 @@ intvec_\vecname:
305 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */ 501 mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
306 .else 502 .else
307 .ifc \vecnum, INT_ILL_TRANS 503 .ifc \vecnum, INT_ILL_TRANS
308 mfspr r2, ILL_TRANS_REASON 504 mfspr r2, ILL_VA_PC
309 .else 505 .else
310 .ifc \vecnum, INT_DOUBLE_FAULT 506 .ifc \vecnum, INT_DOUBLE_FAULT
311 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */ 507 mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
@@ -315,12 +511,10 @@ intvec_\vecname:
315 .else 511 .else
316 .ifc \c_routine, op_handle_perf_interrupt 512 .ifc \c_routine, op_handle_perf_interrupt
317 mfspr r2, PERF_COUNT_STS 513 mfspr r2, PERF_COUNT_STS
318#if CHIP_HAS_AUX_PERF_COUNTERS()
319 .else 514 .else
320 .ifc \c_routine, op_handle_aux_perf_interrupt 515 .ifc \c_routine, op_handle_aux_perf_interrupt
321 mfspr r2, AUX_PERF_COUNT_STS 516 mfspr r2, AUX_PERF_COUNT_STS
322 .endif 517 .endif
323#endif
324 .endif 518 .endif
325 .endif 519 .endif
326 .endif 520 .endif
@@ -339,7 +533,7 @@ intvec_\vecname:
339#ifdef __COLLECT_LINKER_FEEDBACK__ 533#ifdef __COLLECT_LINKER_FEEDBACK__
340 .pushsection .text.intvec_feedback,"ax" 534 .pushsection .text.intvec_feedback,"ax"
341 .org (\vecnum << 5) 535 .org (\vecnum << 5)
342 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt1, 1 << 8) 536 FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
343 jrp lr 537 jrp lr
344 .popsection 538 .popsection
345#endif 539#endif
@@ -455,11 +649,12 @@ intvec_\vecname:
455 /* 649 /*
456 * If we will be returning to the kernel, we will need to 650 * If we will be returning to the kernel, we will need to
457 * reset the interrupt masks to the state they had before. 651 * reset the interrupt masks to the state they had before.
458 * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled. 652 * Set DISABLE_IRQ in flags iff we came from kernel pl with
653 * irqs disabled.
459 */ 654 */
460 mfspr r32, SPR_EX_CONTEXT_K_1 655 mfspr r32, SPR_EX_CONTEXT_K_1
461 { 656 {
462 andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 657 IS_KERNEL_EX1(r22, r22)
463 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS) 658 PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
464 } 659 }
465 beqzt r32, 1f /* zero if from user space */ 660 beqzt r32, 1f /* zero if from user space */
@@ -503,7 +698,7 @@ intvec_\vecname:
503 } 698 }
504 { 699 {
505 shl16insli r21, r21, hw1(__per_cpu_offset) 700 shl16insli r21, r21, hw1(__per_cpu_offset)
506 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1 701 bfextu r20, r20, CPU_SHIFT, 63
507 } 702 }
508 shl16insli r21, r21, hw0(__per_cpu_offset) 703 shl16insli r21, r21, hw0(__per_cpu_offset)
509 shl3add r20, r20, r21 704 shl3add r20, r20, r21
@@ -585,7 +780,7 @@ intvec_\vecname:
585 .macro dc_dispatch vecnum, vecname 780 .macro dc_dispatch vecnum, vecname
586 .org (\vecnum << 8) 781 .org (\vecnum << 8)
587intvec_\vecname: 782intvec_\vecname:
588 j hv_downcall_dispatch 783 j _hv_downcall_dispatch
589 ENDPROC(intvec_\vecname) 784 ENDPROC(intvec_\vecname)
590 .endm 785 .endm
591 786
@@ -626,14 +821,36 @@ STD_ENTRY(interrupt_return)
626 PTREGS_PTR(r29, PTREGS_OFFSET_EX1) 821 PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
627 } 822 }
628 ld r29, r29 823 ld r29, r29
629 andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 824 IS_KERNEL_EX1(r29, r29)
630 { 825 {
631 beqzt r29, .Lresume_userspace 826 beqzt r29, .Lresume_userspace
632 PTREGS_PTR(r29, PTREGS_OFFSET_PC) 827 move r29, sp
828 }
829
830#ifdef CONFIG_PREEMPT
831 /* Returning to kernel space. Check if we need preemption. */
832 EXTRACT_THREAD_INFO(r29)
833 addli r28, r29, THREAD_INFO_FLAGS_OFFSET
834 {
835 ld r28, r28
836 addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
837 }
838 {
839 andi r28, r28, _TIF_NEED_RESCHED
840 ld4s r29, r29
633 } 841 }
842 beqzt r28, 1f
843 bnez r29, 1f
844 jal preempt_schedule_irq
845 FEEDBACK_REENTER(interrupt_return)
8461:
847#endif
634 848
635 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */ 849 /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
636 moveli r27, hw2_last(_cpu_idle_nap) 850 {
851 moveli r27, hw2_last(_cpu_idle_nap)
852 PTREGS_PTR(r29, PTREGS_OFFSET_PC)
853 }
637 { 854 {
638 ld r28, r29 855 ld r28, r29
639 shl16insli r27, r27, hw1(_cpu_idle_nap) 856 shl16insli r27, r27, hw1(_cpu_idle_nap)
@@ -728,7 +945,7 @@ STD_ENTRY(interrupt_return)
728 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS) 945 PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
729 } 946 }
730 { 947 {
731 andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK 948 IS_KERNEL_EX1(r0, r0)
732 ld r32, r32 949 ld r32, r32
733 } 950 }
734 bnez r0, 1f 951 bnez r0, 1f
@@ -799,7 +1016,7 @@ STD_ENTRY(interrupt_return)
799 pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC 1016 pop_reg r21, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_PC
800 { 1017 {
801 mtspr SPR_EX_CONTEXT_K_1, lr 1018 mtspr SPR_EX_CONTEXT_K_1, lr
802 andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */ 1019 IS_KERNEL_EX1(lr, lr)
803 } 1020 }
804 { 1021 {
805 mtspr SPR_EX_CONTEXT_K_0, r21 1022 mtspr SPR_EX_CONTEXT_K_0, r21
@@ -1223,10 +1440,31 @@ STD_ENTRY(_sys_clone)
1223 j sys_clone 1440 j sys_clone
1224 STD_ENDPROC(_sys_clone) 1441 STD_ENDPROC(_sys_clone)
1225 1442
1226/* The single-step support may need to read all the registers. */ 1443 /*
1444 * Recover r3, r2, r1 and r0 here saved by unalign fast vector.
1445 * The vector area limit is 32 bundles, so we handle the reload here.
1446 * r0, r1, r2 are in thread_info from low to high memory in order.
1447 * r3 points to location the original r3 was saved.
1448 * We put this code in the __HEAD section so it can be reached
1449 * via a conditional branch from the fast path.
1450 */
1451 __HEAD
1452hand_unalign_slow:
1453 andi sp, sp, ~1
1454hand_unalign_slow_badsp:
1455 addi r3, r3, -(3 * 8)
1456 ld_add r0, r3, 8
1457 ld_add r1, r3, 8
1458 ld r2, r3
1459hand_unalign_slow_nonuser:
1460 mfspr r3, SPR_SYSTEM_SAVE_K_1
1461 __int_hand INT_UNALIGN_DATA, UNALIGN_DATA_SLOW, int_unalign
1462
1463/* The unaligned data support needs to read all the registers. */
1227int_unalign: 1464int_unalign:
1228 push_extra_callee_saves r0 1465 push_extra_callee_saves r0
1229 j do_trap 1466 j do_unaligned
1467ENDPROC(hand_unalign_slow)
1230 1468
1231/* Fill the return address stack with nonzero entries. */ 1469/* Fill the return address stack with nonzero entries. */
1232STD_ENTRY(fill_ra_stack) 1470STD_ENTRY(fill_ra_stack)
@@ -1240,8 +1478,15 @@ STD_ENTRY(fill_ra_stack)
12404: jrp r0 14784: jrp r0
1241 STD_ENDPROC(fill_ra_stack) 1479 STD_ENDPROC(fill_ra_stack)
1242 1480
1243/* Include .intrpt1 array of interrupt vectors */ 1481 .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
1244 .section ".intrpt1", "ax" 1482 .org (\vecnum << 8)
1483 __int_hand \vecnum, \vecname, \c_routine, \processing
1484 .endm
1485
1486/* Include .intrpt array of interrupt vectors */
1487 .section ".intrpt", "ax"
1488 .global intrpt_start
1489intrpt_start:
1245 1490
1246#define op_handle_perf_interrupt bad_intr 1491#define op_handle_perf_interrupt bad_intr
1247#define op_handle_aux_perf_interrupt bad_intr 1492#define op_handle_aux_perf_interrupt bad_intr
@@ -1272,7 +1517,7 @@ STD_ENTRY(fill_ra_stack)
1272 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall 1517 int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
1273 int_hand INT_SWINT_0, SWINT_0, do_trap 1518 int_hand INT_SWINT_0, SWINT_0, do_trap
1274 int_hand INT_ILL_TRANS, ILL_TRANS, do_trap 1519 int_hand INT_ILL_TRANS, ILL_TRANS, do_trap
1275 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign 1520 int_hand_unalign_fast INT_UNALIGN_DATA, UNALIGN_DATA
1276 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault 1521 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1277 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault 1522 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1278 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap 1523 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 3ccf2cd7182e..0586fdb9352d 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -55,7 +55,8 @@ static DEFINE_PER_CPU(int, irq_depth);
55 55
56/* State for allocating IRQs on Gx. */ 56/* State for allocating IRQs on Gx. */
57#if CHIP_HAS_IPI() 57#if CHIP_HAS_IPI()
58static unsigned long available_irqs = ~(1UL << IRQ_RESCHEDULE); 58static unsigned long available_irqs = ((1UL << NR_IRQS) - 1) &
59 (~(1UL << IRQ_RESCHEDULE));
59static DEFINE_SPINLOCK(available_irqs_lock); 60static DEFINE_SPINLOCK(available_irqs_lock);
60#endif 61#endif
61 62
@@ -73,7 +74,8 @@ static DEFINE_SPINLOCK(available_irqs_lock);
73 74
74/* 75/*
75 * The interrupt handling path, implemented in terms of HV interrupt 76 * The interrupt handling path, implemented in terms of HV interrupt
76 * emulation on TILE64 and TILEPro, and IPI hardware on TILE-Gx. 77 * emulation on TILEPro, and IPI hardware on TILE-Gx.
78 * Entered with interrupts disabled.
77 */ 79 */
78void tile_dev_intr(struct pt_regs *regs, int intnum) 80void tile_dev_intr(struct pt_regs *regs, int intnum)
79{ 81{
@@ -233,7 +235,7 @@ void tile_irq_activate(unsigned int irq, int tile_irq_type)
233{ 235{
234 /* 236 /*
235 * We use handle_level_irq() by default because the pending 237 * We use handle_level_irq() by default because the pending
236 * interrupt vector (whether modeled by the HV on TILE64 and 238 * interrupt vector (whether modeled by the HV on
237 * TILEPro or implemented in hardware on TILE-Gx) has 239 * TILEPro or implemented in hardware on TILE-Gx) has
238 * level-style semantics for each bit. An interrupt fires 240 * level-style semantics for each bit. An interrupt fires
239 * whenever a bit is high, not just at edges. 241 * whenever a bit is high, not just at edges.
diff --git a/arch/tile/kernel/kgdb.c b/arch/tile/kernel/kgdb.c
new file mode 100644
index 000000000000..4cd88381a83e
--- /dev/null
+++ b/arch/tile/kernel/kgdb.c
@@ -0,0 +1,499 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE-Gx KGDB support.
15 */
16
17#include <linux/ptrace.h>
18#include <linux/kgdb.h>
19#include <linux/kdebug.h>
20#include <linux/uaccess.h>
21#include <linux/module.h>
22#include <asm/cacheflush.h>
23
24static tile_bundle_bits singlestep_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
25static unsigned long stepped_addr;
26static tile_bundle_bits stepped_instr;
27
28struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
29 { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0])},
30 { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1])},
31 { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2])},
32 { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3])},
33 { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4])},
34 { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5])},
35 { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6])},
36 { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7])},
37 { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8])},
38 { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9])},
39 { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10])},
40 { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11])},
41 { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12])},
42 { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13])},
43 { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14])},
44 { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15])},
45 { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16])},
46 { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17])},
47 { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18])},
48 { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19])},
49 { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20])},
50 { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21])},
51 { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22])},
52 { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23])},
53 { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24])},
54 { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25])},
55 { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26])},
56 { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27])},
57 { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28])},
58 { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29])},
59 { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30])},
60 { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31])},
61 { "r32", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[32])},
62 { "r33", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[33])},
63 { "r34", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[34])},
64 { "r35", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[35])},
65 { "r36", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[36])},
66 { "r37", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[37])},
67 { "r38", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[38])},
68 { "r39", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[39])},
69 { "r40", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[40])},
70 { "r41", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[41])},
71 { "r42", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[42])},
72 { "r43", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[43])},
73 { "r44", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[44])},
74 { "r45", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[45])},
75 { "r46", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[46])},
76 { "r47", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[47])},
77 { "r48", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[48])},
78 { "r49", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[49])},
79 { "r50", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[50])},
80 { "r51", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[51])},
81 { "r52", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[52])},
82 { "tp", GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
83 { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
84 { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, lr)},
85 { "sn", GDB_SIZEOF_REG, -1},
86 { "idn0", GDB_SIZEOF_REG, -1},
87 { "idn1", GDB_SIZEOF_REG, -1},
88 { "udn0", GDB_SIZEOF_REG, -1},
89 { "udn1", GDB_SIZEOF_REG, -1},
90 { "udn2", GDB_SIZEOF_REG, -1},
91 { "udn3", GDB_SIZEOF_REG, -1},
92 { "zero", GDB_SIZEOF_REG, -1},
93 { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc)},
94 { "faultnum", GDB_SIZEOF_REG, offsetof(struct pt_regs, faultnum)},
95};
96
97char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
98{
99 if (regno >= DBG_MAX_REG_NUM || regno < 0)
100 return NULL;
101
102 if (dbg_reg_def[regno].offset != -1)
103 memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
104 dbg_reg_def[regno].size);
105 else
106 memset(mem, 0, dbg_reg_def[regno].size);
107 return dbg_reg_def[regno].name;
108}
109
110int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
111{
112 if (regno >= DBG_MAX_REG_NUM || regno < 0)
113 return -EINVAL;
114
115 if (dbg_reg_def[regno].offset != -1)
116 memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
117 dbg_reg_def[regno].size);
118 return 0;
119}
120
121/*
122 * Similar to pt_regs_to_gdb_regs() except that process is sleeping and so
123 * we may not be able to get all the info.
124 */
125void
126sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
127{
128 int reg;
129 struct pt_regs *thread_regs;
130 unsigned long *ptr = gdb_regs;
131
132 if (task == NULL)
133 return;
134
135 /* Initialize to zero. */
136 memset(gdb_regs, 0, NUMREGBYTES);
137
138 thread_regs = task_pt_regs(task);
139 for (reg = 0; reg <= TREG_LAST_GPR; reg++)
140 *(ptr++) = thread_regs->regs[reg];
141
142 gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
143 gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
144}
145
146void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
147{
148 regs->pc = pc;
149}
150
151static void kgdb_call_nmi_hook(void *ignored)
152{
153 kgdb_nmicallback(raw_smp_processor_id(), NULL);
154}
155
156void kgdb_roundup_cpus(unsigned long flags)
157{
158 local_irq_enable();
159 smp_call_function(kgdb_call_nmi_hook, NULL, 0);
160 local_irq_disable();
161}
162
163/*
164 * Convert a kernel address to the writable kernel text mapping.
165 */
166static unsigned long writable_address(unsigned long addr)
167{
168 unsigned long ret = 0;
169
170 if (core_kernel_text(addr))
171 ret = addr - MEM_SV_START + PAGE_OFFSET;
172 else if (is_module_text_address(addr))
173 ret = addr;
174 else
175 pr_err("Unknown virtual address 0x%lx\n", addr);
176
177 return ret;
178}
179
180/*
181 * Calculate the new address for after a step.
182 */
183static unsigned long get_step_address(struct pt_regs *regs)
184{
185 int src_reg;
186 int jump_off;
187 int br_off;
188 unsigned long addr;
189 unsigned int opcode;
190 tile_bundle_bits bundle;
191
192 /* Move to the next instruction by default. */
193 addr = regs->pc + TILEGX_BUNDLE_SIZE_IN_BYTES;
194 bundle = *(unsigned long *)instruction_pointer(regs);
195
196 /* 0: X mode, Otherwise: Y mode. */
197 if (bundle & TILEGX_BUNDLE_MODE_MASK) {
198 if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
199 get_RRROpcodeExtension_Y1(bundle) ==
200 UNARY_RRR_1_OPCODE_Y1) {
201 opcode = get_UnaryOpcodeExtension_Y1(bundle);
202
203 switch (opcode) {
204 case JALR_UNARY_OPCODE_Y1:
205 case JALRP_UNARY_OPCODE_Y1:
206 case JR_UNARY_OPCODE_Y1:
207 case JRP_UNARY_OPCODE_Y1:
208 src_reg = get_SrcA_Y1(bundle);
209 dbg_get_reg(src_reg, &addr, regs);
210 break;
211 }
212 }
213 } else if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
214 if (get_RRROpcodeExtension_X1(bundle) ==
215 UNARY_RRR_0_OPCODE_X1) {
216 opcode = get_UnaryOpcodeExtension_X1(bundle);
217
218 switch (opcode) {
219 case JALR_UNARY_OPCODE_X1:
220 case JALRP_UNARY_OPCODE_X1:
221 case JR_UNARY_OPCODE_X1:
222 case JRP_UNARY_OPCODE_X1:
223 src_reg = get_SrcA_X1(bundle);
224 dbg_get_reg(src_reg, &addr, regs);
225 break;
226 }
227 }
228 } else if (get_Opcode_X1(bundle) == JUMP_OPCODE_X1) {
229 opcode = get_JumpOpcodeExtension_X1(bundle);
230
231 switch (opcode) {
232 case JAL_JUMP_OPCODE_X1:
233 case J_JUMP_OPCODE_X1:
234 jump_off = sign_extend(get_JumpOff_X1(bundle), 27);
235 addr = regs->pc +
236 (jump_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
237 break;
238 }
239 } else if (get_Opcode_X1(bundle) == BRANCH_OPCODE_X1) {
240 br_off = 0;
241 opcode = get_BrType_X1(bundle);
242
243 switch (opcode) {
244 case BEQZT_BRANCH_OPCODE_X1:
245 case BEQZ_BRANCH_OPCODE_X1:
246 if (get_SrcA_X1(bundle) == 0)
247 br_off = get_BrOff_X1(bundle);
248 break;
249 case BGEZT_BRANCH_OPCODE_X1:
250 case BGEZ_BRANCH_OPCODE_X1:
251 if (get_SrcA_X1(bundle) >= 0)
252 br_off = get_BrOff_X1(bundle);
253 break;
254 case BGTZT_BRANCH_OPCODE_X1:
255 case BGTZ_BRANCH_OPCODE_X1:
256 if (get_SrcA_X1(bundle) > 0)
257 br_off = get_BrOff_X1(bundle);
258 break;
259 case BLBCT_BRANCH_OPCODE_X1:
260 case BLBC_BRANCH_OPCODE_X1:
261 if (!(get_SrcA_X1(bundle) & 1))
262 br_off = get_BrOff_X1(bundle);
263 break;
264 case BLBST_BRANCH_OPCODE_X1:
265 case BLBS_BRANCH_OPCODE_X1:
266 if (get_SrcA_X1(bundle) & 1)
267 br_off = get_BrOff_X1(bundle);
268 break;
269 case BLEZT_BRANCH_OPCODE_X1:
270 case BLEZ_BRANCH_OPCODE_X1:
271 if (get_SrcA_X1(bundle) <= 0)
272 br_off = get_BrOff_X1(bundle);
273 break;
274 case BLTZT_BRANCH_OPCODE_X1:
275 case BLTZ_BRANCH_OPCODE_X1:
276 if (get_SrcA_X1(bundle) < 0)
277 br_off = get_BrOff_X1(bundle);
278 break;
279 case BNEZT_BRANCH_OPCODE_X1:
280 case BNEZ_BRANCH_OPCODE_X1:
281 if (get_SrcA_X1(bundle) != 0)
282 br_off = get_BrOff_X1(bundle);
283 break;
284 }
285
286 if (br_off != 0) {
287 br_off = sign_extend(br_off, 17);
288 addr = regs->pc +
289 (br_off << TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES);
290 }
291 }
292
293 return addr;
294}
295
296/*
297 * Replace the next instruction after the current instruction with a
298 * breakpoint instruction.
299 */
300static void do_single_step(struct pt_regs *regs)
301{
302 unsigned long addr_wr;
303
304 /* Determine where the target instruction will send us to. */
305 stepped_addr = get_step_address(regs);
306 probe_kernel_read((char *)&stepped_instr, (char *)stepped_addr,
307 BREAK_INSTR_SIZE);
308
309 addr_wr = writable_address(stepped_addr);
310 probe_kernel_write((char *)addr_wr, (char *)&singlestep_insn,
311 BREAK_INSTR_SIZE);
312 smp_wmb();
313 flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
314}
315
316static void undo_single_step(struct pt_regs *regs)
317{
318 unsigned long addr_wr;
319
320 if (stepped_instr == 0)
321 return;
322
323 addr_wr = writable_address(stepped_addr);
324 probe_kernel_write((char *)addr_wr, (char *)&stepped_instr,
325 BREAK_INSTR_SIZE);
326 stepped_instr = 0;
327 smp_wmb();
328 flush_icache_range(stepped_addr, stepped_addr + BREAK_INSTR_SIZE);
329}
330
331/*
332 * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
333 * then try to fall into the debugger.
334 */
335static int
336kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
337{
338 int ret;
339 unsigned long flags;
340 struct die_args *args = (struct die_args *)ptr;
341 struct pt_regs *regs = args->regs;
342
343#ifdef CONFIG_KPROBES
344 /*
345 * Return immediately if the kprobes fault notifier has set
346 * DIE_PAGE_FAULT.
347 */
348 if (cmd == DIE_PAGE_FAULT)
349 return NOTIFY_DONE;
350#endif /* CONFIG_KPROBES */
351
352 switch (cmd) {
353 case DIE_BREAK:
354 case DIE_COMPILED_BPT:
355 break;
356 case DIE_SSTEPBP:
357 local_irq_save(flags);
358 kgdb_handle_exception(0, SIGTRAP, 0, regs);
359 local_irq_restore(flags);
360 return NOTIFY_STOP;
361 default:
362 /* Userspace events, ignore. */
363 if (user_mode(regs))
364 return NOTIFY_DONE;
365 }
366
367 local_irq_save(flags);
368 ret = kgdb_handle_exception(args->trapnr, args->signr, args->err, regs);
369 local_irq_restore(flags);
370 if (ret)
371 return NOTIFY_DONE;
372
373 return NOTIFY_STOP;
374}
375
376static struct notifier_block kgdb_notifier = {
377 .notifier_call = kgdb_notify,
378};
379
380/*
381 * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
382 * @vector: The error vector of the exception that happened.
383 * @signo: The signal number of the exception that happened.
384 * @err_code: The error code of the exception that happened.
385 * @remcom_in_buffer: The buffer of the packet we have read.
386 * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
387 * @regs: The &struct pt_regs of the current process.
388 *
389 * This function MUST handle the 'c' and 's' command packets,
390 * as well packets to set / remove a hardware breakpoint, if used.
391 * If there are additional packets which the hardware needs to handle,
392 * they are handled here. The code should return -1 if it wants to
393 * process more packets, and a %0 or %1 if it wants to exit from the
394 * kgdb callback.
395 */
396int kgdb_arch_handle_exception(int vector, int signo, int err_code,
397 char *remcom_in_buffer, char *remcom_out_buffer,
398 struct pt_regs *regs)
399{
400 char *ptr;
401 unsigned long address;
402
403 /* Undo any stepping we may have done. */
404 undo_single_step(regs);
405
406 switch (remcom_in_buffer[0]) {
407 case 'c':
408 case 's':
409 case 'D':
410 case 'k':
411 /*
412 * Try to read optional parameter, pc unchanged if no parm.
413 * If this was a compiled-in breakpoint, we need to move
414 * to the next instruction or we will just breakpoint
415 * over and over again.
416 */
417 ptr = &remcom_in_buffer[1];
418 if (kgdb_hex2long(&ptr, &address))
419 regs->pc = address;
420 else if (*(unsigned long *)regs->pc == compiled_bpt)
421 regs->pc += BREAK_INSTR_SIZE;
422
423 if (remcom_in_buffer[0] == 's') {
424 do_single_step(regs);
425 kgdb_single_step = 1;
426 atomic_set(&kgdb_cpu_doing_single_step,
427 raw_smp_processor_id());
428 } else
429 atomic_set(&kgdb_cpu_doing_single_step, -1);
430
431 return 0;
432 }
433
434 return -1; /* this means that we do not want to exit from the handler */
435}
436
437struct kgdb_arch arch_kgdb_ops;
438
439/*
440 * kgdb_arch_init - Perform any architecture specific initalization.
441 *
442 * This function will handle the initalization of any architecture
443 * specific callbacks.
444 */
445int kgdb_arch_init(void)
446{
447 tile_bundle_bits bundle = TILEGX_BPT_BUNDLE;
448
449 memcpy(arch_kgdb_ops.gdb_bpt_instr, &bundle, BREAK_INSTR_SIZE);
450 return register_die_notifier(&kgdb_notifier);
451}
452
453/*
454 * kgdb_arch_exit - Perform any architecture specific uninitalization.
455 *
456 * This function will handle the uninitalization of any architecture
457 * specific callbacks, for dynamic registration and unregistration.
458 */
459void kgdb_arch_exit(void)
460{
461 unregister_die_notifier(&kgdb_notifier);
462}
463
464int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
465{
466 int err;
467 unsigned long addr_wr = writable_address(bpt->bpt_addr);
468
469 if (addr_wr == 0)
470 return -1;
471
472 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
473 BREAK_INSTR_SIZE);
474 if (err)
475 return err;
476
477 err = probe_kernel_write((char *)addr_wr, arch_kgdb_ops.gdb_bpt_instr,
478 BREAK_INSTR_SIZE);
479 smp_wmb();
480 flush_icache_range((unsigned long)bpt->bpt_addr,
481 (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
482 return err;
483}
484
485int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
486{
487 int err;
488 unsigned long addr_wr = writable_address(bpt->bpt_addr);
489
490 if (addr_wr == 0)
491 return -1;
492
493 err = probe_kernel_write((char *)addr_wr, (char *)bpt->saved_instr,
494 BREAK_INSTR_SIZE);
495 smp_wmb();
496 flush_icache_range((unsigned long)bpt->bpt_addr,
497 (unsigned long)bpt->bpt_addr + BREAK_INSTR_SIZE);
498 return err;
499}
diff --git a/arch/tile/kernel/kprobes.c b/arch/tile/kernel/kprobes.c
new file mode 100644
index 000000000000..27cdcacbe81d
--- /dev/null
+++ b/arch/tile/kernel/kprobes.c
@@ -0,0 +1,528 @@
1/*
2 * arch/tile/kernel/kprobes.c
3 * Kprobes on TILE-Gx
4 *
5 * Some portions copied from the MIPS version.
6 *
7 * Copyright (C) IBM Corporation, 2002, 2004
8 * Copyright 2006 Sony Corp.
9 * Copyright 2010 Cavium Networks
10 *
11 * Copyright 2012 Tilera Corporation. All Rights Reserved.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation, version 2.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
20 * NON INFRINGEMENT. See the GNU General Public License for
21 * more details.
22 */
23
24#include <linux/kprobes.h>
25#include <linux/kdebug.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/uaccess.h>
29#include <asm/cacheflush.h>
30
31#include <arch/opcode.h>
32
33DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
34DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
35
36tile_bundle_bits breakpoint_insn = TILEGX_BPT_BUNDLE;
37tile_bundle_bits breakpoint2_insn = TILEGX_BPT_BUNDLE | DIE_SSTEPBP;
38
39/*
40 * Check whether instruction is branch or jump, or if executing it
41 * has different results depending on where it is executed (e.g. lnk).
42 */
43static int __kprobes insn_has_control(kprobe_opcode_t insn)
44{
45 if (get_Mode(insn) != 0) { /* Y-format bundle */
46 if (get_Opcode_Y1(insn) != RRR_1_OPCODE_Y1 ||
47 get_RRROpcodeExtension_Y1(insn) != UNARY_RRR_1_OPCODE_Y1)
48 return 0;
49
50 switch (get_UnaryOpcodeExtension_Y1(insn)) {
51 case JALRP_UNARY_OPCODE_Y1:
52 case JALR_UNARY_OPCODE_Y1:
53 case JRP_UNARY_OPCODE_Y1:
54 case JR_UNARY_OPCODE_Y1:
55 case LNK_UNARY_OPCODE_Y1:
56 return 1;
57 default:
58 return 0;
59 }
60 }
61
62 switch (get_Opcode_X1(insn)) {
63 case BRANCH_OPCODE_X1: /* branch instructions */
64 case JUMP_OPCODE_X1: /* jump instructions: j and jal */
65 return 1;
66
67 case RRR_0_OPCODE_X1: /* other jump instructions */
68 if (get_RRROpcodeExtension_X1(insn) != UNARY_RRR_0_OPCODE_X1)
69 return 0;
70 switch (get_UnaryOpcodeExtension_X1(insn)) {
71 case JALRP_UNARY_OPCODE_X1:
72 case JALR_UNARY_OPCODE_X1:
73 case JRP_UNARY_OPCODE_X1:
74 case JR_UNARY_OPCODE_X1:
75 case LNK_UNARY_OPCODE_X1:
76 return 1;
77 default:
78 return 0;
79 }
80 default:
81 return 0;
82 }
83}
84
85int __kprobes arch_prepare_kprobe(struct kprobe *p)
86{
87 unsigned long addr = (unsigned long)p->addr;
88
89 if (addr & (sizeof(kprobe_opcode_t) - 1))
90 return -EINVAL;
91
92 if (insn_has_control(*p->addr)) {
93 pr_notice("Kprobes for control instructions are not "
94 "supported\n");
95 return -EINVAL;
96 }
97
98 /* insn: must be on special executable page on tile. */
99 p->ainsn.insn = get_insn_slot();
100 if (!p->ainsn.insn)
101 return -ENOMEM;
102
103 /*
104 * In the kprobe->ainsn.insn[] array we store the original
105 * instruction at index zero and a break trap instruction at
106 * index one.
107 */
108 memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
109 p->ainsn.insn[1] = breakpoint2_insn;
110 p->opcode = *p->addr;
111
112 return 0;
113}
114
115void __kprobes arch_arm_kprobe(struct kprobe *p)
116{
117 unsigned long addr_wr;
118
119 /* Operate on writable kernel text mapping. */
120 addr_wr = (unsigned long)p->addr - MEM_SV_START + PAGE_OFFSET;
121
122 if (probe_kernel_write((void *)addr_wr, &breakpoint_insn,
123 sizeof(breakpoint_insn)))
124 pr_err("%s: failed to enable kprobe\n", __func__);
125
126 smp_wmb();
127 flush_insn_slot(p);
128}
129
130void __kprobes arch_disarm_kprobe(struct kprobe *kp)
131{
132 unsigned long addr_wr;
133
134 /* Operate on writable kernel text mapping. */
135 addr_wr = (unsigned long)kp->addr - MEM_SV_START + PAGE_OFFSET;
136
137 if (probe_kernel_write((void *)addr_wr, &kp->opcode,
138 sizeof(kp->opcode)))
139 pr_err("%s: failed to enable kprobe\n", __func__);
140
141 smp_wmb();
142 flush_insn_slot(kp);
143}
144
145void __kprobes arch_remove_kprobe(struct kprobe *p)
146{
147 if (p->ainsn.insn) {
148 free_insn_slot(p->ainsn.insn, 0);
149 p->ainsn.insn = NULL;
150 }
151}
152
153static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
154{
155 kcb->prev_kprobe.kp = kprobe_running();
156 kcb->prev_kprobe.status = kcb->kprobe_status;
157 kcb->prev_kprobe.saved_pc = kcb->kprobe_saved_pc;
158}
159
160static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
161{
162 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
163 kcb->kprobe_status = kcb->prev_kprobe.status;
164 kcb->kprobe_saved_pc = kcb->prev_kprobe.saved_pc;
165}
166
167static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
168 struct kprobe_ctlblk *kcb)
169{
170 __this_cpu_write(current_kprobe, p);
171 kcb->kprobe_saved_pc = regs->pc;
172}
173
174static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
175{
176 /* Single step inline if the instruction is a break. */
177 if (p->opcode == breakpoint_insn ||
178 p->opcode == breakpoint2_insn)
179 regs->pc = (unsigned long)p->addr;
180 else
181 regs->pc = (unsigned long)&p->ainsn.insn[0];
182}
183
184static int __kprobes kprobe_handler(struct pt_regs *regs)
185{
186 struct kprobe *p;
187 int ret = 0;
188 kprobe_opcode_t *addr;
189 struct kprobe_ctlblk *kcb;
190
191 addr = (kprobe_opcode_t *)regs->pc;
192
193 /*
194 * We don't want to be preempted for the entire
195 * duration of kprobe processing.
196 */
197 preempt_disable();
198 kcb = get_kprobe_ctlblk();
199
200 /* Check we're not actually recursing. */
201 if (kprobe_running()) {
202 p = get_kprobe(addr);
203 if (p) {
204 if (kcb->kprobe_status == KPROBE_HIT_SS &&
205 p->ainsn.insn[0] == breakpoint_insn) {
206 goto no_kprobe;
207 }
208 /*
209 * We have reentered the kprobe_handler(), since
210 * another probe was hit while within the handler.
211 * We here save the original kprobes variables and
212 * just single step on the instruction of the new probe
213 * without calling any user handlers.
214 */
215 save_previous_kprobe(kcb);
216 set_current_kprobe(p, regs, kcb);
217 kprobes_inc_nmissed_count(p);
218 prepare_singlestep(p, regs);
219 kcb->kprobe_status = KPROBE_REENTER;
220 return 1;
221 } else {
222 if (*addr != breakpoint_insn) {
223 /*
224 * The breakpoint instruction was removed by
225 * another cpu right after we hit, no further
226 * handling of this interrupt is appropriate.
227 */
228 ret = 1;
229 goto no_kprobe;
230 }
231 p = __this_cpu_read(current_kprobe);
232 if (p->break_handler && p->break_handler(p, regs))
233 goto ss_probe;
234 }
235 goto no_kprobe;
236 }
237
238 p = get_kprobe(addr);
239 if (!p) {
240 if (*addr != breakpoint_insn) {
241 /*
242 * The breakpoint instruction was removed right
243 * after we hit it. Another cpu has removed
244 * either a probepoint or a debugger breakpoint
245 * at this address. In either case, no further
246 * handling of this interrupt is appropriate.
247 */
248 ret = 1;
249 }
250 /* Not one of ours: let kernel handle it. */
251 goto no_kprobe;
252 }
253
254 set_current_kprobe(p, regs, kcb);
255 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
256
257 if (p->pre_handler && p->pre_handler(p, regs)) {
258 /* Handler has already set things up, so skip ss setup. */
259 return 1;
260 }
261
262ss_probe:
263 prepare_singlestep(p, regs);
264 kcb->kprobe_status = KPROBE_HIT_SS;
265 return 1;
266
267no_kprobe:
268 preempt_enable_no_resched();
269 return ret;
270}
271
272/*
273 * Called after single-stepping. p->addr is the address of the
274 * instruction that has been replaced by the breakpoint. To avoid the
275 * SMP problems that can occur when we temporarily put back the
276 * original opcode to single-step, we single-stepped a copy of the
277 * instruction. The address of this copy is p->ainsn.insn.
278 *
279 * This function prepares to return from the post-single-step
280 * breakpoint trap.
281 */
282static void __kprobes resume_execution(struct kprobe *p,
283 struct pt_regs *regs,
284 struct kprobe_ctlblk *kcb)
285{
286 unsigned long orig_pc = kcb->kprobe_saved_pc;
287 regs->pc = orig_pc + 8;
288}
289
290static inline int post_kprobe_handler(struct pt_regs *regs)
291{
292 struct kprobe *cur = kprobe_running();
293 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
294
295 if (!cur)
296 return 0;
297
298 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
299 kcb->kprobe_status = KPROBE_HIT_SSDONE;
300 cur->post_handler(cur, regs, 0);
301 }
302
303 resume_execution(cur, regs, kcb);
304
305 /* Restore back the original saved kprobes variables and continue. */
306 if (kcb->kprobe_status == KPROBE_REENTER) {
307 restore_previous_kprobe(kcb);
308 goto out;
309 }
310 reset_current_kprobe();
311out:
312 preempt_enable_no_resched();
313
314 return 1;
315}
316
317static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
318{
319 struct kprobe *cur = kprobe_running();
320 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
321
322 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
323 return 1;
324
325 if (kcb->kprobe_status & KPROBE_HIT_SS) {
326 /*
327 * We are here because the instruction being single
328 * stepped caused a page fault. We reset the current
329 * kprobe and the ip points back to the probe address
330 * and allow the page fault handler to continue as a
331 * normal page fault.
332 */
333 resume_execution(cur, regs, kcb);
334 reset_current_kprobe();
335 preempt_enable_no_resched();
336 }
337 return 0;
338}
339
340/*
341 * Wrapper routine for handling exceptions.
342 */
343int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
344 unsigned long val, void *data)
345{
346 struct die_args *args = (struct die_args *)data;
347 int ret = NOTIFY_DONE;
348
349 switch (val) {
350 case DIE_BREAK:
351 if (kprobe_handler(args->regs))
352 ret = NOTIFY_STOP;
353 break;
354 case DIE_SSTEPBP:
355 if (post_kprobe_handler(args->regs))
356 ret = NOTIFY_STOP;
357 break;
358 case DIE_PAGE_FAULT:
359 /* kprobe_running() needs smp_processor_id(). */
360 preempt_disable();
361
362 if (kprobe_running()
363 && kprobe_fault_handler(args->regs, args->trapnr))
364 ret = NOTIFY_STOP;
365 preempt_enable();
366 break;
367 default:
368 break;
369 }
370 return ret;
371}
372
373int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
374{
375 struct jprobe *jp = container_of(p, struct jprobe, kp);
376 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
377
378 kcb->jprobe_saved_regs = *regs;
379 kcb->jprobe_saved_sp = regs->sp;
380
381 memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
382 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
383
384 regs->pc = (unsigned long)(jp->entry);
385
386 return 1;
387}
388
389/* Defined in the inline asm below. */
390void jprobe_return_end(void);
391
392void __kprobes jprobe_return(void)
393{
394 asm volatile(
395 "bpt\n\t"
396 ".globl jprobe_return_end\n"
397 "jprobe_return_end:\n");
398}
399
400int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
401{
402 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
403
404 if (regs->pc >= (unsigned long)jprobe_return &&
405 regs->pc <= (unsigned long)jprobe_return_end) {
406 *regs = kcb->jprobe_saved_regs;
407 memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
408 MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
409 preempt_enable_no_resched();
410
411 return 1;
412 }
413 return 0;
414}
415
416/*
417 * Function return probe trampoline:
418 * - init_kprobes() establishes a probepoint here
419 * - When the probed function returns, this probe causes the
420 * handlers to fire
421 */
422static void __used kretprobe_trampoline_holder(void)
423{
424 asm volatile(
425 "nop\n\t"
426 ".global kretprobe_trampoline\n"
427 "kretprobe_trampoline:\n\t"
428 "nop\n\t"
429 : : : "memory");
430}
431
432void kretprobe_trampoline(void);
433
434void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
435 struct pt_regs *regs)
436{
437 ri->ret_addr = (kprobe_opcode_t *) regs->lr;
438
439 /* Replace the return addr with trampoline addr */
440 regs->lr = (unsigned long)kretprobe_trampoline;
441}
442
443/*
444 * Called when the probe at kretprobe trampoline is hit.
445 */
446static int __kprobes trampoline_probe_handler(struct kprobe *p,
447 struct pt_regs *regs)
448{
449 struct kretprobe_instance *ri = NULL;
450 struct hlist_head *head, empty_rp;
451 struct hlist_node *tmp;
452 unsigned long flags, orig_ret_address = 0;
453 unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
454
455 INIT_HLIST_HEAD(&empty_rp);
456 kretprobe_hash_lock(current, &head, &flags);
457
458 /*
459 * It is possible to have multiple instances associated with a given
460 * task either because multiple functions in the call path have
461 * a return probe installed on them, and/or more than one return
462 * return probe was registered for a target function.
463 *
464 * We can handle this because:
465 * - instances are always inserted at the head of the list
466 * - when multiple return probes are registered for the same
467 * function, the first instance's ret_addr will point to the
468 * real return address, and all the rest will point to
469 * kretprobe_trampoline
470 */
471 hlist_for_each_entry_safe(ri, tmp, head, hlist) {
472 if (ri->task != current)
473 /* another task is sharing our hash bucket */
474 continue;
475
476 if (ri->rp && ri->rp->handler)
477 ri->rp->handler(ri, regs);
478
479 orig_ret_address = (unsigned long)ri->ret_addr;
480 recycle_rp_inst(ri, &empty_rp);
481
482 if (orig_ret_address != trampoline_address) {
483 /*
484 * This is the real return address. Any other
485 * instances associated with this task are for
486 * other calls deeper on the call stack
487 */
488 break;
489 }
490 }
491
492 kretprobe_assert(ri, orig_ret_address, trampoline_address);
493 instruction_pointer(regs) = orig_ret_address;
494
495 reset_current_kprobe();
496 kretprobe_hash_unlock(current, &flags);
497 preempt_enable_no_resched();
498
499 hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
500 hlist_del(&ri->hlist);
501 kfree(ri);
502 }
503 /*
504 * By returning a non-zero value, we are telling
505 * kprobe_handler() that we don't want the post_handler
506 * to run (and have re-enabled preemption)
507 */
508 return 1;
509}
510
511int __kprobes arch_trampoline_kprobe(struct kprobe *p)
512{
513 if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
514 return 1;
515
516 return 0;
517}
518
519static struct kprobe trampoline_p = {
520 .addr = (kprobe_opcode_t *)kretprobe_trampoline,
521 .pre_handler = trampoline_probe_handler
522};
523
524int __init arch_init_kprobes(void)
525{
526 register_kprobe(&trampoline_p);
527 return 0;
528}
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S
new file mode 100644
index 000000000000..70d7bb0c4d8f
--- /dev/null
+++ b/arch/tile/kernel/mcount_64.S
@@ -0,0 +1,224 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * TILE-Gx specific __mcount support
15 */
16
17#include <linux/linkage.h>
18#include <asm/ftrace.h>
19
20#define REGSIZE 8
21
22 .text
23 .global __mcount
24
25 .macro MCOUNT_SAVE_REGS
26 addli sp, sp, -REGSIZE
27 {
28 st sp, lr
29 addli r29, sp, - (12 * REGSIZE)
30 }
31 {
32 addli sp, sp, - (13 * REGSIZE)
33 st r29, sp
34 }
35 addli r29, r29, REGSIZE
36 { st r29, r0; addli r29, r29, REGSIZE }
37 { st r29, r1; addli r29, r29, REGSIZE }
38 { st r29, r2; addli r29, r29, REGSIZE }
39 { st r29, r3; addli r29, r29, REGSIZE }
40 { st r29, r4; addli r29, r29, REGSIZE }
41 { st r29, r5; addli r29, r29, REGSIZE }
42 { st r29, r6; addli r29, r29, REGSIZE }
43 { st r29, r7; addli r29, r29, REGSIZE }
44 { st r29, r8; addli r29, r29, REGSIZE }
45 { st r29, r9; addli r29, r29, REGSIZE }
46 { st r29, r10; addli r29, r29, REGSIZE }
47 .endm
48
49 .macro MCOUNT_RESTORE_REGS
50 addli r29, sp, (2 * REGSIZE)
51 { ld r0, r29; addli r29, r29, REGSIZE }
52 { ld r1, r29; addli r29, r29, REGSIZE }
53 { ld r2, r29; addli r29, r29, REGSIZE }
54 { ld r3, r29; addli r29, r29, REGSIZE }
55 { ld r4, r29; addli r29, r29, REGSIZE }
56 { ld r5, r29; addli r29, r29, REGSIZE }
57 { ld r6, r29; addli r29, r29, REGSIZE }
58 { ld r7, r29; addli r29, r29, REGSIZE }
59 { ld r8, r29; addli r29, r29, REGSIZE }
60 { ld r9, r29; addli r29, r29, REGSIZE }
61 { ld r10, r29; addli lr, sp, (13 * REGSIZE) }
62 { ld lr, lr; addli sp, sp, (14 * REGSIZE) }
63 .endm
64
65 .macro RETURN_BACK
66 { move r12, lr; move lr, r10 }
67 jrp r12
68 .endm
69
70#ifdef CONFIG_DYNAMIC_FTRACE
71
72 .align 64
73STD_ENTRY(__mcount)
74__mcount:
75 j ftrace_stub
76STD_ENDPROC(__mcount)
77
78 .align 64
79STD_ENTRY(ftrace_caller)
80 moveli r11, hw2_last(function_trace_stop)
81 { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr }
82 { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 }
83 ld r11, r11
84 beqz r11, 1f
85 jrp r12
86
871:
88 { move r10, lr; move lr, r12 }
89 MCOUNT_SAVE_REGS
90
91 /* arg1: self return address */
92 /* arg2: parent's return address */
93 { move r0, lr; move r1, r10 }
94
95 .global ftrace_call
96ftrace_call:
97 /*
98 * a placeholder for the call to a real tracing function, i.e.
99 * ftrace_trace_function()
100 */
101 nop
102
103#ifdef CONFIG_FUNCTION_GRAPH_TRACER
104 .global ftrace_graph_call
105ftrace_graph_call:
106 /*
107 * a placeholder for the call to a real tracing function, i.e.
108 * ftrace_graph_caller()
109 */
110 nop
111#endif
112 MCOUNT_RESTORE_REGS
113 .global ftrace_stub
114ftrace_stub:
115 RETURN_BACK
116STD_ENDPROC(ftrace_caller)
117
118#else /* ! CONFIG_DYNAMIC_FTRACE */
119
120 .align 64
121STD_ENTRY(__mcount)
122 moveli r11, hw2_last(function_trace_stop)
123 { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr }
124 { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 }
125 ld r11, r11
126 beqz r11, 1f
127 jrp r12
128
1291:
130 { move r10, lr; move lr, r12 }
131 {
132 moveli r11, hw2_last(ftrace_trace_function)
133 moveli r13, hw2_last(ftrace_stub)
134 }
135 {
136 shl16insli r11, r11, hw1(ftrace_trace_function)
137 shl16insli r13, r13, hw1(ftrace_stub)
138 }
139 {
140 shl16insli r11, r11, hw0(ftrace_trace_function)
141 shl16insli r13, r13, hw0(ftrace_stub)
142 }
143
144 ld r11, r11
145 sub r14, r13, r11
146 bnez r14, static_trace
147
148#ifdef CONFIG_FUNCTION_GRAPH_TRACER
149 moveli r15, hw2_last(ftrace_graph_return)
150 shl16insli r15, r15, hw1(ftrace_graph_return)
151 shl16insli r15, r15, hw0(ftrace_graph_return)
152 ld r15, r15
153 sub r15, r15, r13
154 bnez r15, ftrace_graph_caller
155
156 {
157 moveli r16, hw2_last(ftrace_graph_entry)
158 moveli r17, hw2_last(ftrace_graph_entry_stub)
159 }
160 {
161 shl16insli r16, r16, hw1(ftrace_graph_entry)
162 shl16insli r17, r17, hw1(ftrace_graph_entry_stub)
163 }
164 {
165 shl16insli r16, r16, hw0(ftrace_graph_entry)
166 shl16insli r17, r17, hw0(ftrace_graph_entry_stub)
167 }
168 ld r16, r16
169 sub r17, r16, r17
170 bnez r17, ftrace_graph_caller
171
172#endif
173 RETURN_BACK
174
175static_trace:
176 MCOUNT_SAVE_REGS
177
178 /* arg1: self return address */
179 /* arg2: parent's return address */
180 { move r0, lr; move r1, r10 }
181
182 /* call ftrace_trace_function() */
183 jalr r11
184
185 MCOUNT_RESTORE_REGS
186
187 .global ftrace_stub
188ftrace_stub:
189 RETURN_BACK
190STD_ENDPROC(__mcount)
191
192#endif /* ! CONFIG_DYNAMIC_FTRACE */
193
194#ifdef CONFIG_FUNCTION_GRAPH_TRACER
195
196STD_ENTRY(ftrace_graph_caller)
197ftrace_graph_caller:
198#ifndef CONFIG_DYNAMIC_FTRACE
199 MCOUNT_SAVE_REGS
200#endif
201
202 /* arg1: Get the location of the parent's return address */
203 addi r0, sp, 12 * REGSIZE
204 /* arg2: Get self return address */
205 move r1, lr
206
207 jal prepare_ftrace_return
208
209 MCOUNT_RESTORE_REGS
210 RETURN_BACK
211STD_ENDPROC(ftrace_graph_caller)
212
213 .global return_to_handler
214return_to_handler:
215 MCOUNT_SAVE_REGS
216
217 jal ftrace_return_to_handler
218 /* restore the real parent address */
219 move r11, r0
220
221 MCOUNT_RESTORE_REGS
222 jr r11
223
224#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/tile/kernel/pci-dma.c b/arch/tile/kernel/pci-dma.c
index b9fe80ec1089..09b58703ac26 100644
--- a/arch/tile/kernel/pci-dma.c
+++ b/arch/tile/kernel/pci-dma.c
@@ -36,8 +36,9 @@ static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
36 dma_addr_t *dma_handle, gfp_t gfp, 36 dma_addr_t *dma_handle, gfp_t gfp,
37 struct dma_attrs *attrs) 37 struct dma_attrs *attrs)
38{ 38{
39 u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32); 39 u64 dma_mask = (dev && dev->coherent_dma_mask) ?
40 int node = dev_to_node(dev); 40 dev->coherent_dma_mask : DMA_BIT_MASK(32);
41 int node = dev ? dev_to_node(dev) : 0;
41 int order = get_order(size); 42 int order = get_order(size);
42 struct page *pg; 43 struct page *pg;
43 dma_addr_t addr; 44 dma_addr_t addr;
@@ -256,7 +257,7 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
256 BUG_ON(!valid_dma_direction(direction)); 257 BUG_ON(!valid_dma_direction(direction));
257 258
258 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), 259 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
259 dma_address & PAGE_OFFSET, size, direction); 260 dma_address & (PAGE_SIZE - 1), size, direction);
260} 261}
261 262
262static void tile_dma_sync_single_for_cpu(struct device *dev, 263static void tile_dma_sync_single_for_cpu(struct device *dev,
@@ -357,7 +358,7 @@ static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
357 358
358 addr = page_to_phys(pg); 359 addr = page_to_phys(pg);
359 360
360 *dma_handle = phys_to_dma(dev, addr); 361 *dma_handle = addr + get_dma_offset(dev);
361 362
362 return page_address(pg); 363 return page_address(pg);
363} 364}
@@ -387,7 +388,7 @@ static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
387 sg->dma_address = sg_phys(sg); 388 sg->dma_address = sg_phys(sg);
388 __dma_prep_pa_range(sg->dma_address, sg->length, direction); 389 __dma_prep_pa_range(sg->dma_address, sg->length, direction);
389 390
390 sg->dma_address = phys_to_dma(dev, sg->dma_address); 391 sg->dma_address = sg->dma_address + get_dma_offset(dev);
391#ifdef CONFIG_NEED_SG_DMA_LENGTH 392#ifdef CONFIG_NEED_SG_DMA_LENGTH
392 sg->dma_length = sg->length; 393 sg->dma_length = sg->length;
393#endif 394#endif
@@ -422,7 +423,7 @@ static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
422 BUG_ON(offset + size > PAGE_SIZE); 423 BUG_ON(offset + size > PAGE_SIZE);
423 __dma_prep_page(page, offset, size, direction); 424 __dma_prep_page(page, offset, size, direction);
424 425
425 return phys_to_dma(dev, page_to_pa(page) + offset); 426 return page_to_pa(page) + offset + get_dma_offset(dev);
426} 427}
427 428
428static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address, 429static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
@@ -432,10 +433,10 @@ static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
432{ 433{
433 BUG_ON(!valid_dma_direction(direction)); 434 BUG_ON(!valid_dma_direction(direction));
434 435
435 dma_address = dma_to_phys(dev, dma_address); 436 dma_address -= get_dma_offset(dev);
436 437
437 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)), 438 __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
438 dma_address & PAGE_OFFSET, size, direction); 439 dma_address & (PAGE_SIZE - 1), size, direction);
439} 440}
440 441
441static void tile_pci_dma_sync_single_for_cpu(struct device *dev, 442static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
@@ -445,7 +446,7 @@ static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
445{ 446{
446 BUG_ON(!valid_dma_direction(direction)); 447 BUG_ON(!valid_dma_direction(direction));
447 448
448 dma_handle = dma_to_phys(dev, dma_handle); 449 dma_handle -= get_dma_offset(dev);
449 450
450 __dma_complete_pa_range(dma_handle, size, direction); 451 __dma_complete_pa_range(dma_handle, size, direction);
451} 452}
@@ -456,7 +457,7 @@ static void tile_pci_dma_sync_single_for_device(struct device *dev,
456 enum dma_data_direction 457 enum dma_data_direction
457 direction) 458 direction)
458{ 459{
459 dma_handle = dma_to_phys(dev, dma_handle); 460 dma_handle -= get_dma_offset(dev);
460 461
461 __dma_prep_pa_range(dma_handle, size, direction); 462 __dma_prep_pa_range(dma_handle, size, direction);
462} 463}
@@ -558,22 +559,47 @@ static struct dma_map_ops pci_swiotlb_dma_ops = {
558 .mapping_error = swiotlb_dma_mapping_error, 559 .mapping_error = swiotlb_dma_mapping_error,
559}; 560};
560 561
562static struct dma_map_ops pci_hybrid_dma_ops = {
563 .alloc = tile_swiotlb_alloc_coherent,
564 .free = tile_swiotlb_free_coherent,
565 .map_page = tile_pci_dma_map_page,
566 .unmap_page = tile_pci_dma_unmap_page,
567 .map_sg = tile_pci_dma_map_sg,
568 .unmap_sg = tile_pci_dma_unmap_sg,
569 .sync_single_for_cpu = tile_pci_dma_sync_single_for_cpu,
570 .sync_single_for_device = tile_pci_dma_sync_single_for_device,
571 .sync_sg_for_cpu = tile_pci_dma_sync_sg_for_cpu,
572 .sync_sg_for_device = tile_pci_dma_sync_sg_for_device,
573 .mapping_error = tile_pci_dma_mapping_error,
574 .dma_supported = tile_pci_dma_supported
575};
576
561struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops; 577struct dma_map_ops *gx_legacy_pci_dma_map_ops = &pci_swiotlb_dma_ops;
578struct dma_map_ops *gx_hybrid_pci_dma_map_ops = &pci_hybrid_dma_ops;
562#else 579#else
563struct dma_map_ops *gx_legacy_pci_dma_map_ops; 580struct dma_map_ops *gx_legacy_pci_dma_map_ops;
581struct dma_map_ops *gx_hybrid_pci_dma_map_ops;
564#endif 582#endif
565EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops); 583EXPORT_SYMBOL(gx_legacy_pci_dma_map_ops);
584EXPORT_SYMBOL(gx_hybrid_pci_dma_map_ops);
566 585
567#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK 586#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
568int dma_set_coherent_mask(struct device *dev, u64 mask) 587int dma_set_coherent_mask(struct device *dev, u64 mask)
569{ 588{
570 struct dma_map_ops *dma_ops = get_dma_ops(dev); 589 struct dma_map_ops *dma_ops = get_dma_ops(dev);
571 590
572 /* Handle legacy PCI devices with limited memory addressability. */ 591 /*
573 if (((dma_ops == gx_pci_dma_map_ops) || 592 * For PCI devices with 64-bit DMA addressing capability, promote
574 (dma_ops == gx_legacy_pci_dma_map_ops)) && 593 * the dma_ops to full capability for both streams and consistent
575 (mask <= DMA_BIT_MASK(32))) { 594 * memory access. For 32-bit capable devices, limit the consistent
576 if (mask > dev->archdata.max_direct_dma_addr) 595 * memory DMA range to max_direct_dma_addr.
596 */
597 if (dma_ops == gx_pci_dma_map_ops ||
598 dma_ops == gx_hybrid_pci_dma_map_ops ||
599 dma_ops == gx_legacy_pci_dma_map_ops) {
600 if (mask == DMA_BIT_MASK(64))
601 set_dma_ops(dev, gx_pci_dma_map_ops);
602 else if (mask > dev->archdata.max_direct_dma_addr)
577 mask = dev->archdata.max_direct_dma_addr; 603 mask = dev->archdata.max_direct_dma_addr;
578 } 604 }
579 605
@@ -584,3 +610,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
584} 610}
585EXPORT_SYMBOL(dma_set_coherent_mask); 611EXPORT_SYMBOL(dma_set_coherent_mask);
586#endif 612#endif
613
614#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
615/*
616 * The generic dma_get_required_mask() uses the highest physical address
617 * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
618 * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
619 * DMAs to use the full 64-bit PCI address space and not limited by
620 * the physical memory space, we always let the PCI devices use
621 * 64-bit DMA if they have that capability, by returning the 64-bit
622 * DMA mask here. The device driver has the option to use 32-bit DMA if
623 * the device is not capable of 64-bit DMA.
624 */
625u64 dma_get_required_mask(struct device *dev)
626{
627 return DMA_BIT_MASK(64);
628}
629EXPORT_SYMBOL_GPL(dma_get_required_mask);
630#endif
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
index 67237d34c2e2..b7180e6e900d 100644
--- a/arch/tile/kernel/pci.c
+++ b/arch/tile/kernel/pci.c
@@ -20,7 +20,6 @@
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/bootmem.h>
24#include <linux/irq.h> 23#include <linux/irq.h>
25#include <linux/io.h> 24#include <linux/io.h>
26#include <linux/uaccess.h> 25#include <linux/uaccess.h>
@@ -52,6 +51,8 @@
52 * 51 *
53 */ 52 */
54 53
54static int pci_probe = 1;
55
55/* 56/*
56 * This flag tells if the platform is TILEmpower that needs 57 * This flag tells if the platform is TILEmpower that needs
57 * special configuration for the PLX switch chip. 58 * special configuration for the PLX switch chip.
@@ -144,6 +145,11 @@ int __init tile_pci_init(void)
144{ 145{
145 int i; 146 int i;
146 147
148 if (!pci_probe) {
149 pr_info("PCI: disabled by boot argument\n");
150 return 0;
151 }
152
147 pr_info("PCI: Searching for controllers...\n"); 153 pr_info("PCI: Searching for controllers...\n");
148 154
149 /* Re-init number of PCIe controllers to support hot-plug feature. */ 155 /* Re-init number of PCIe controllers to support hot-plug feature. */
@@ -192,7 +198,6 @@ int __init tile_pci_init(void)
192 controller->hv_cfg_fd[0] = hv_cfg_fd0; 198 controller->hv_cfg_fd[0] = hv_cfg_fd0;
193 controller->hv_cfg_fd[1] = hv_cfg_fd1; 199 controller->hv_cfg_fd[1] = hv_cfg_fd1;
194 controller->hv_mem_fd = hv_mem_fd; 200 controller->hv_mem_fd = hv_mem_fd;
195 controller->first_busno = 0;
196 controller->last_busno = 0xff; 201 controller->last_busno = 0xff;
197 controller->ops = &tile_cfg_ops; 202 controller->ops = &tile_cfg_ops;
198 203
@@ -283,7 +288,7 @@ int __init pcibios_init(void)
283 * known to require at least 20ms here, but we use a more 288 * known to require at least 20ms here, but we use a more
284 * conservative value. 289 * conservative value.
285 */ 290 */
286 mdelay(250); 291 msleep(250);
287 292
288 /* Scan all of the recorded PCI controllers. */ 293 /* Scan all of the recorded PCI controllers. */
289 for (i = 0; i < TILE_NUM_PCIE; i++) { 294 for (i = 0; i < TILE_NUM_PCIE; i++) {
@@ -304,18 +309,10 @@ int __init pcibios_init(void)
304 309
305 pr_info("PCI: initializing controller #%d\n", i); 310 pr_info("PCI: initializing controller #%d\n", i);
306 311
307 /*
308 * This comes from the generic Linux PCI driver.
309 *
310 * It reads the PCI tree for this bus into the Linux
311 * data structures.
312 *
313 * This is inlined in linux/pci.h and calls into
314 * pci_scan_bus_parented() in probe.c.
315 */
316 pci_add_resource(&resources, &ioport_resource); 312 pci_add_resource(&resources, &ioport_resource);
317 pci_add_resource(&resources, &iomem_resource); 313 pci_add_resource(&resources, &iomem_resource);
318 bus = pci_scan_root_bus(NULL, 0, controller->ops, controller, &resources); 314 bus = pci_scan_root_bus(NULL, 0, controller->ops,
315 controller, &resources);
319 controller->root_bus = bus; 316 controller->root_bus = bus;
320 controller->last_busno = bus->busn_res.end; 317 controller->last_busno = bus->busn_res.end;
321 } 318 }
@@ -388,6 +385,16 @@ void pcibios_set_master(struct pci_dev *dev)
388 /* No special bus mastering setup handling. */ 385 /* No special bus mastering setup handling. */
389} 386}
390 387
388/* Process any "pci=" kernel boot arguments. */
389char *__init pcibios_setup(char *str)
390{
391 if (!strcmp(str, "off")) {
392 pci_probe = 0;
393 return NULL;
394 }
395 return str;
396}
397
391/* 398/*
392 * Enable memory and/or address decoding, as appropriate, for the 399 * Enable memory and/or address decoding, as appropriate, for the
393 * device described by the 'dev' struct. 400 * device described by the 'dev' struct.
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 11425633b2d7..a97a6452b812 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -69,19 +69,32 @@ static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
69 * a HW PCIe link-training bug. The exact delay is specified with 69 * a HW PCIe link-training bug. The exact delay is specified with
70 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S", 70 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
71 * where T is the TRIO instance number, P is the port number and S is 71 * where T is the TRIO instance number, P is the port number and S is
72 * the delay in seconds. If the delay is not provided, the value 72 * the delay in seconds. If the argument is specified, but the delay is
73 * will be DEFAULT_RC_DELAY. 73 * not provided, the value will be DEFAULT_RC_DELAY.
74 */ 74 */
75static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; 75static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
76 76
77/* Default number of seconds that the PCIe RC port probe can be delayed. */ 77/* Default number of seconds that the PCIe RC port probe can be delayed. */
78#define DEFAULT_RC_DELAY 10 78#define DEFAULT_RC_DELAY 10
79 79
80/* Max number of seconds that the PCIe RC port probe can be delayed. */ 80/* The PCI I/O space size in each PCI domain. */
81#define MAX_RC_DELAY 20 81#define IO_SPACE_SIZE 0x10000
82
83/* Provide shorter versions of some very long constant names. */
84#define AUTO_CONFIG_RC \
85 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC
86#define AUTO_CONFIG_RC_G1 \
87 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1
88#define AUTO_CONFIG_EP \
89 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT
90#define AUTO_CONFIG_EP_G1 \
91 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1
82 92
83/* Array of the PCIe ports configuration info obtained from the BIB. */ 93/* Array of the PCIe ports configuration info obtained from the BIB. */
84struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES]; 94struct pcie_trio_ports_property pcie_ports[TILEGX_NUM_TRIO];
95
96/* Number of configured TRIO instances. */
97int num_trio_shims;
85 98
86/* All drivers share the TRIO contexts defined here. */ 99/* All drivers share the TRIO contexts defined here. */
87gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO]; 100gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
@@ -89,24 +102,21 @@ gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
89/* Pointer to an array of PCIe RC controllers. */ 102/* Pointer to an array of PCIe RC controllers. */
90struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES]; 103struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
91int num_rc_controllers; 104int num_rc_controllers;
92static int num_ep_controllers;
93 105
94static struct pci_ops tile_cfg_ops; 106static struct pci_ops tile_cfg_ops;
95 107
96/* Mask of CPUs that should receive PCIe interrupts. */ 108/* Mask of CPUs that should receive PCIe interrupts. */
97static struct cpumask intr_cpus_map; 109static struct cpumask intr_cpus_map;
98 110
99/* 111/* We don't need to worry about the alignment of resources. */
100 * We don't need to worry about the alignment of resources.
101 */
102resource_size_t pcibios_align_resource(void *data, const struct resource *res, 112resource_size_t pcibios_align_resource(void *data, const struct resource *res,
103 resource_size_t size, resource_size_t align) 113 resource_size_t size,
114 resource_size_t align)
104{ 115{
105 return res->start; 116 return res->start;
106} 117}
107EXPORT_SYMBOL(pcibios_align_resource); 118EXPORT_SYMBOL(pcibios_align_resource);
108 119
109
110/* 120/*
111 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #. 121 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
112 * For now, we simply send interrupts to non-dataplane CPUs. 122 * For now, we simply send interrupts to non-dataplane CPUs.
@@ -134,24 +144,19 @@ static int tile_irq_cpu(int irq)
134 return cpu; 144 return cpu;
135} 145}
136 146
137/* 147/* Open a file descriptor to the TRIO shim. */
138 * Open a file descriptor to the TRIO shim.
139 */
140static int tile_pcie_open(int trio_index) 148static int tile_pcie_open(int trio_index)
141{ 149{
142 gxio_trio_context_t *context = &trio_contexts[trio_index]; 150 gxio_trio_context_t *context = &trio_contexts[trio_index];
143 int ret; 151 int ret;
152 int mac;
144 153
145 /* 154 /* This opens a file descriptor to the TRIO shim. */
146 * This opens a file descriptor to the TRIO shim.
147 */
148 ret = gxio_trio_init(context, trio_index); 155 ret = gxio_trio_init(context, trio_index);
149 if (ret < 0) 156 if (ret < 0)
150 return ret; 157 goto gxio_trio_init_failure;
151 158
152 /* 159 /* Allocate an ASID for the kernel. */
153 * Allocate an ASID for the kernel.
154 */
155 ret = gxio_trio_alloc_asids(context, 1, 0, 0); 160 ret = gxio_trio_alloc_asids(context, 1, 0, 0);
156 if (ret < 0) { 161 if (ret < 0) {
157 pr_err("PCI: ASID alloc failure on TRIO %d, give up\n", 162 pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
@@ -189,31 +194,97 @@ static int tile_pcie_open(int trio_index)
189 } 194 }
190#endif 195#endif
191 196
197 /* Get the properties of the PCIe ports on this TRIO instance. */
198 ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
199 if (ret < 0) {
200 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
201 " on TRIO %d\n", ret, trio_index);
202 goto get_port_property_failure;
203 }
204
205 context->mmio_base_mac =
206 iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
207 if (context->mmio_base_mac == NULL) {
208 pr_err("PCI: TRIO config space mapping failure, error %d,"
209 " on TRIO %d\n", ret, trio_index);
210 ret = -ENOMEM;
211
212 goto trio_mmio_mapping_failure;
213 }
214
215 /* Check the port strap state which will override the BIB setting. */
216 for (mac = 0; mac < TILEGX_TRIO_PCIES; mac++) {
217 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
218 unsigned int reg_offset;
219
220 /* Ignore ports that are not specified in the BIB. */
221 if (!pcie_ports[trio_index].ports[mac].allow_rc &&
222 !pcie_ports[trio_index].ports[mac].allow_ep)
223 continue;
224
225 reg_offset =
226 (TRIO_PCIE_INTFC_PORT_CONFIG <<
227 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
228 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
229 TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
230 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
231
232 port_config.word =
233 __gxio_mmio_read(context->mmio_base_mac + reg_offset);
234
235 if (port_config.strap_state != AUTO_CONFIG_RC &&
236 port_config.strap_state != AUTO_CONFIG_RC_G1) {
237 /*
238 * If this is really intended to be an EP port, record
239 * it so that the endpoint driver will know about it.
240 */
241 if (port_config.strap_state == AUTO_CONFIG_EP ||
242 port_config.strap_state == AUTO_CONFIG_EP_G1)
243 pcie_ports[trio_index].ports[mac].allow_ep = 1;
244 }
245 }
246
192 return ret; 247 return ret;
193 248
249trio_mmio_mapping_failure:
250get_port_property_failure:
194asid_alloc_failure: 251asid_alloc_failure:
195#ifdef USE_SHARED_PCIE_CONFIG_REGION 252#ifdef USE_SHARED_PCIE_CONFIG_REGION
196pio_alloc_failure: 253pio_alloc_failure:
197#endif 254#endif
198 hv_dev_close(context->fd); 255 hv_dev_close(context->fd);
256gxio_trio_init_failure:
257 context->fd = -1;
199 258
200 return ret; 259 return ret;
201} 260}
202 261
203static void 262static int __init tile_trio_init(void)
204tilegx_legacy_irq_ack(struct irq_data *d) 263{
264 int i;
265
266 /* We loop over all the TRIO shims. */
267 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
268 if (tile_pcie_open(i) < 0)
269 continue;
270 num_trio_shims++;
271 }
272
273 return 0;
274}
275postcore_initcall(tile_trio_init);
276
277static void tilegx_legacy_irq_ack(struct irq_data *d)
205{ 278{
206 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); 279 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
207} 280}
208 281
209static void 282static void tilegx_legacy_irq_mask(struct irq_data *d)
210tilegx_legacy_irq_mask(struct irq_data *d)
211{ 283{
212 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); 284 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
213} 285}
214 286
215static void 287static void tilegx_legacy_irq_unmask(struct irq_data *d)
216tilegx_legacy_irq_unmask(struct irq_data *d)
217{ 288{
218 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); 289 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
219} 290}
@@ -234,8 +305,7 @@ static struct irq_chip tilegx_legacy_irq_chip = {
234 * to Linux which just calls handle_level_irq() after clearing the 305 * to Linux which just calls handle_level_irq() after clearing the
235 * MAC INTx Assert status bit associated with this interrupt. 306 * MAC INTx Assert status bit associated with this interrupt.
236 */ 307 */
237static void 308static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
238trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
239{ 309{
240 struct pci_controller *controller = irq_desc_get_handler_data(desc); 310 struct pci_controller *controller = irq_desc_get_handler_data(desc);
241 gxio_trio_context_t *trio_context = controller->trio; 311 gxio_trio_context_t *trio_context = controller->trio;
@@ -301,9 +371,7 @@ static int tile_init_irqs(struct pci_controller *controller)
301 goto free_irqs; 371 goto free_irqs;
302 } 372 }
303 373
304 /* 374 /* Register the IRQ handler with the kernel. */
305 * Register the IRQ handler with the kernel.
306 */
307 irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip, 375 irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
308 trio_handle_level_irq); 376 trio_handle_level_irq);
309 irq_set_chip_data(irq, (void *)(uint64_t)i); 377 irq_set_chip_data(irq, (void *)(uint64_t)i);
@@ -320,14 +388,39 @@ free_irqs:
320} 388}
321 389
322/* 390/*
391 * Return 1 if the port is strapped to operate in RC mode.
392 */
393static int
394strapped_for_rc(gxio_trio_context_t *trio_context, int mac)
395{
396 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
397 unsigned int reg_offset;
398
399 /* Check the port configuration. */
400 reg_offset =
401 (TRIO_PCIE_INTFC_PORT_CONFIG <<
402 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
403 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
404 TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
405 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
406 port_config.word =
407 __gxio_mmio_read(trio_context->mmio_base_mac + reg_offset);
408
409 if (port_config.strap_state == AUTO_CONFIG_RC ||
410 port_config.strap_state == AUTO_CONFIG_RC_G1)
411 return 1;
412 else
413 return 0;
414}
415
416/*
323 * Find valid controllers and fill in pci_controller structs for each 417 * Find valid controllers and fill in pci_controller structs for each
324 * of them. 418 * of them.
325 * 419 *
326 * Returns the number of controllers discovered. 420 * Return the number of controllers discovered.
327 */ 421 */
328int __init tile_pci_init(void) 422int __init tile_pci_init(void)
329{ 423{
330 int num_trio_shims = 0;
331 int ctl_index = 0; 424 int ctl_index = 0;
332 int i, j; 425 int i, j;
333 426
@@ -338,64 +431,62 @@ int __init tile_pci_init(void)
338 431
339 pr_info("PCI: Searching for controllers...\n"); 432 pr_info("PCI: Searching for controllers...\n");
340 433
341 /*
342 * We loop over all the TRIO shims.
343 */
344 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
345 int ret;
346
347 ret = tile_pcie_open(i);
348 if (ret < 0)
349 continue;
350
351 num_trio_shims++;
352 }
353
354 if (num_trio_shims == 0 || sim_is_simulator()) 434 if (num_trio_shims == 0 || sim_is_simulator())
355 return 0; 435 return 0;
356 436
357 /* 437 /*
358 * Now determine which PCIe ports are configured to operate in RC mode. 438 * Now determine which PCIe ports are configured to operate in RC
359 * We look at the Board Information Block first and then see if there 439 * mode. There is a differece in the port configuration capability
360 * are any overriding configuration by the HW strapping pin. 440 * between the Gx36 and Gx72 devices.
441 *
442 * The Gx36 has configuration capability for each of the 3 PCIe
443 * interfaces (disable, auto endpoint, auto RC, etc.).
444 * On the Gx72, you can only select one of the 3 PCIe interfaces per
445 * TRIO to train automatically. Further, the allowable training modes
446 * are reduced to four options (auto endpoint, auto RC, stream x1,
447 * stream x4).
448 *
449 * For Gx36 ports, it must be allowed to be in RC mode by the
450 * Board Information Block, and the hardware strapping pins must be
451 * set to RC mode.
452 *
453 * For Gx72 ports, the port will operate in RC mode if either of the
454 * following is true:
455 * 1. It is allowed to be in RC mode by the Board Information Block,
456 * and the BIB doesn't allow the EP mode.
457 * 2. It is allowed to be in either the RC or the EP mode by the BIB,
458 * and the hardware strapping pin is set to RC mode.
361 */ 459 */
362 for (i = 0; i < TILEGX_NUM_TRIO; i++) { 460 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
363 gxio_trio_context_t *context = &trio_contexts[i]; 461 gxio_trio_context_t *context = &trio_contexts[i];
364 int ret;
365 462
366 if (context->fd < 0) 463 if (context->fd < 0)
367 continue; 464 continue;
368 465
369 ret = hv_dev_pread(context->fd, 0,
370 (HV_VirtAddr)&pcie_ports[i][0],
371 sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES,
372 GXIO_TRIO_OP_GET_PORT_PROPERTY);
373 if (ret < 0) {
374 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
375 " on TRIO %d\n", ret, i);
376 continue;
377 }
378
379 for (j = 0; j < TILEGX_TRIO_PCIES; j++) { 466 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
380 if (pcie_ports[i][j].allow_rc) { 467 int is_rc = 0;
468
469 if (pcie_ports[i].is_gx72 &&
470 pcie_ports[i].ports[j].allow_rc) {
471 if (!pcie_ports[i].ports[j].allow_ep ||
472 strapped_for_rc(context, j))
473 is_rc = 1;
474 } else if (pcie_ports[i].ports[j].allow_rc &&
475 strapped_for_rc(context, j)) {
476 is_rc = 1;
477 }
478 if (is_rc) {
381 pcie_rc[i][j] = 1; 479 pcie_rc[i][j] = 1;
382 num_rc_controllers++; 480 num_rc_controllers++;
383 } 481 }
384 else if (pcie_ports[i][j].allow_ep) {
385 num_ep_controllers++;
386 }
387 } 482 }
388 } 483 }
389 484
390 /* 485 /* Return if no PCIe ports are configured to operate in RC mode. */
391 * Return if no PCIe ports are configured to operate in RC mode.
392 */
393 if (num_rc_controllers == 0) 486 if (num_rc_controllers == 0)
394 return 0; 487 return 0;
395 488
396 /* 489 /* Set the TRIO pointer and MAC index for each PCIe RC port. */
397 * Set the TRIO pointer and MAC index for each PCIe RC port.
398 */
399 for (i = 0; i < TILEGX_NUM_TRIO; i++) { 490 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
400 for (j = 0; j < TILEGX_TRIO_PCIES; j++) { 491 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
401 if (pcie_rc[i][j]) { 492 if (pcie_rc[i][j]) {
@@ -411,26 +502,32 @@ int __init tile_pci_init(void)
411 } 502 }
412 503
413out: 504out:
414 /* 505 /* Configure each PCIe RC port. */
415 * Configure each PCIe RC port.
416 */
417 for (i = 0; i < num_rc_controllers; i++) { 506 for (i = 0; i < num_rc_controllers; i++) {
418 /*
419 * Configure the PCIe MAC to run in RC mode.
420 */
421 507
508 /* Configure the PCIe MAC to run in RC mode. */
422 struct pci_controller *controller = &pci_controllers[i]; 509 struct pci_controller *controller = &pci_controllers[i];
423 510
424 controller->index = i; 511 controller->index = i;
425 controller->ops = &tile_cfg_ops; 512 controller->ops = &tile_cfg_ops;
426 513
514 controller->io_space.start = PCIBIOS_MIN_IO +
515 (i * IO_SPACE_SIZE);
516 controller->io_space.end = controller->io_space.start +
517 IO_SPACE_SIZE - 1;
518 BUG_ON(controller->io_space.end > IO_SPACE_LIMIT);
519 controller->io_space.flags = IORESOURCE_IO;
520 snprintf(controller->io_space_name,
521 sizeof(controller->io_space_name),
522 "PCI I/O domain %d", i);
523 controller->io_space.name = controller->io_space_name;
524
427 /* 525 /*
428 * The PCI memory resource is located above the PA space. 526 * The PCI memory resource is located above the PA space.
429 * For every host bridge, the BAR window or the MMIO aperture 527 * For every host bridge, the BAR window or the MMIO aperture
430 * is in range [3GB, 4GB - 1] of a 4GB space beyond the 528 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
431 * PA space. 529 * PA space.
432 */ 530 */
433
434 controller->mem_offset = TILE_PCI_MEM_START + 531 controller->mem_offset = TILE_PCI_MEM_START +
435 (i * TILE_PCI_BAR_WINDOW_TOP); 532 (i * TILE_PCI_BAR_WINDOW_TOP);
436 controller->mem_space.start = controller->mem_offset + 533 controller->mem_space.start = controller->mem_offset +
@@ -458,7 +555,6 @@ static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
458 return controller->irq_intx_table[pin - 1]; 555 return controller->irq_intx_table[pin - 1];
459} 556}
460 557
461
462static void fixup_read_and_payload_sizes(struct pci_controller *controller) 558static void fixup_read_and_payload_sizes(struct pci_controller *controller)
463{ 559{
464 gxio_trio_context_t *trio_context = controller->trio; 560 gxio_trio_context_t *trio_context = controller->trio;
@@ -472,9 +568,7 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
472 568
473 mac = controller->mac; 569 mac = controller->mac;
474 570
475 /* 571 /* Set our max read request size to be 4KB. */
476 * Set our max read request size to be 4KB.
477 */
478 reg_offset = 572 reg_offset =
479 (TRIO_PCIE_RC_DEVICE_CONTROL << 573 (TRIO_PCIE_RC_DEVICE_CONTROL <<
480 TRIO_CFG_REGION_ADDR__REG_SHIFT) | 574 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -483,10 +577,10 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
483 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); 577 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
484 578
485 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac + 579 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
486 reg_offset); 580 reg_offset);
487 dev_control.max_read_req_sz = 5; 581 dev_control.max_read_req_sz = 5;
488 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, 582 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
489 dev_control.word); 583 dev_control.word);
490 584
491 /* 585 /*
492 * Set the max payload size supported by this Gx PCIe MAC. 586 * Set the max payload size supported by this Gx PCIe MAC.
@@ -502,19 +596,14 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
502 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); 596 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
503 597
504 rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac + 598 rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
505 reg_offset); 599 reg_offset);
506 rc_dev_cap.mps_sup = 1; 600 rc_dev_cap.mps_sup = 1;
507 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset, 601 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
508 rc_dev_cap.word); 602 rc_dev_cap.word);
509 603
510 /* Configure PCI Express MPS setting. */ 604 /* Configure PCI Express MPS setting. */
511 list_for_each_entry(child, &root_bus->children, node) { 605 list_for_each_entry(child, &root_bus->children, node)
512 struct pci_dev *self = child->self; 606 pcie_bus_configure_settings(child);
513 if (!self)
514 continue;
515
516 pcie_bus_configure_settings(child, self->pcie_mpss);
517 }
518 607
519 /* 608 /*
520 * Set the mac_config register in trio based on the MPS/MRS of the link. 609 * Set the mac_config register in trio based on the MPS/MRS of the link.
@@ -533,7 +622,7 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
533 dev_control.max_payload_size, 622 dev_control.max_payload_size,
534 dev_control.max_read_req_sz, 623 dev_control.max_read_req_sz,
535 mac); 624 mac);
536 if (err < 0) { 625 if (err < 0) {
537 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, " 626 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
538 "MAC %d on TRIO %d\n", 627 "MAC %d on TRIO %d\n",
539 mac, controller->trio_index); 628 mac, controller->trio_index);
@@ -570,21 +659,14 @@ static int setup_pcie_rc_delay(char *str)
570 if (!isdigit(*str)) 659 if (!isdigit(*str))
571 return -EINVAL; 660 return -EINVAL;
572 delay = simple_strtoul(str, (char **)&str, 10); 661 delay = simple_strtoul(str, (char **)&str, 10);
573 if (delay > MAX_RC_DELAY)
574 return -EINVAL;
575 } 662 }
576 663
577 rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY; 664 rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
578 pr_info("Delaying PCIe RC link training for %u sec"
579 " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac],
580 mac, trio_index);
581 return 0; 665 return 0;
582} 666}
583early_param("pcie_rc_delay", setup_pcie_rc_delay); 667early_param("pcie_rc_delay", setup_pcie_rc_delay);
584 668
585/* 669/* PCI initialization entry point, called by subsys_initcall. */
586 * PCI initialization entry point, called by subsys_initcall.
587 */
588int __init pcibios_init(void) 670int __init pcibios_init(void)
589{ 671{
590 resource_size_t offset; 672 resource_size_t offset;
@@ -594,35 +676,10 @@ int __init pcibios_init(void)
594 676
595 tile_pci_init(); 677 tile_pci_init();
596 678
597 if (num_rc_controllers == 0 && num_ep_controllers == 0) 679 if (num_rc_controllers == 0)
598 return 0; 680 return 0;
599 681
600 /* 682 /*
601 * We loop over all the TRIO shims and set up the MMIO mappings.
602 */
603 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
604 gxio_trio_context_t *context = &trio_contexts[i];
605
606 if (context->fd < 0)
607 continue;
608
609 /*
610 * Map in the MMIO space for the MAC.
611 */
612 offset = 0;
613 context->mmio_base_mac =
614 iorpc_ioremap(context->fd, offset,
615 HV_TRIO_CONFIG_IOREMAP_SIZE);
616 if (context->mmio_base_mac == NULL) {
617 pr_err("PCI: MAC map failure on TRIO %d\n", i);
618
619 hv_dev_close(context->fd);
620 context->fd = -1;
621 continue;
622 }
623 }
624
625 /*
626 * Delay a bit in case devices aren't ready. Some devices are 683 * Delay a bit in case devices aren't ready. Some devices are
627 * known to require at least 20ms here, but we use a more 684 * known to require at least 20ms here, but we use a more
628 * conservative value. 685 * conservative value.
@@ -633,7 +690,6 @@ int __init pcibios_init(void)
633 for (next_busno = 0, i = 0; i < num_rc_controllers; i++) { 690 for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
634 struct pci_controller *controller = &pci_controllers[i]; 691 struct pci_controller *controller = &pci_controllers[i];
635 gxio_trio_context_t *trio_context = controller->trio; 692 gxio_trio_context_t *trio_context = controller->trio;
636 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
637 TRIO_PCIE_INTFC_PORT_STATUS_t port_status; 693 TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
638 TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl; 694 TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
639 struct pci_bus *bus; 695 struct pci_bus *bus;
@@ -650,75 +706,64 @@ int __init pcibios_init(void)
650 mac = controller->mac; 706 mac = controller->mac;
651 707
652 /* 708 /*
653 * Check the port strap state which will override the BIB 709 * Check for PCIe link-up status to decide if we need
654 * setting. 710 * to force the link to come up.
655 */ 711 */
656
657 reg_offset = 712 reg_offset =
658 (TRIO_PCIE_INTFC_PORT_CONFIG << 713 (TRIO_PCIE_INTFC_PORT_STATUS <<
659 TRIO_CFG_REGION_ADDR__REG_SHIFT) | 714 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
660 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << 715 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
661 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) | 716 TRIO_CFG_REGION_ADDR__INTFC_SHIFT) |
662 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT); 717 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
663 718
664 port_config.word = 719 port_status.word =
665 __gxio_mmio_read(trio_context->mmio_base_mac + 720 __gxio_mmio_read(trio_context->mmio_base_mac +
666 reg_offset); 721 reg_offset);
667 722 if (!port_status.dl_up) {
668 if ((port_config.strap_state != 723 if (rc_delay[trio_index][mac]) {
669 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) && 724 pr_info("Delaying PCIe RC TRIO init %d sec"
670 (port_config.strap_state != 725 " on MAC %d on TRIO %d\n",
671 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) { 726 rc_delay[trio_index][mac], mac,
672 /* 727 trio_index);
673 * If this is really intended to be an EP port, 728 msleep(rc_delay[trio_index][mac] * 1000);
674 * record it so that the endpoint driver will know about it. 729 }
675 */ 730 ret = gxio_trio_force_rc_link_up(trio_context, mac);
676 if (port_config.strap_state == 731 if (ret < 0)
677 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT || 732 pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
678 port_config.strap_state == 733 "MAC %d on TRIO %d\n", mac, trio_index);
679 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1)
680 pcie_ports[trio_index][mac].allow_ep = 1;
681
682 continue;
683 } 734 }
684 735
685 /*
686 * Delay the RC link training if needed.
687 */
688 if (rc_delay[trio_index][mac])
689 msleep(rc_delay[trio_index][mac] * 1000);
690
691 ret = gxio_trio_force_rc_link_up(trio_context, mac);
692 if (ret < 0)
693 pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
694 "MAC %d on TRIO %d\n", mac, trio_index);
695
696 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i, 736 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
697 trio_index, controller->mac); 737 trio_index, controller->mac);
698 738
699 /* 739 /* Delay the bus probe if needed. */
700 * Wait a bit here because some EP devices take longer 740 if (rc_delay[trio_index][mac]) {
701 * to come up. 741 pr_info("Delaying PCIe RC bus enumerating %d sec"
702 */ 742 " on MAC %d on TRIO %d\n",
703 msleep(1000); 743 rc_delay[trio_index][mac], mac,
704 744 trio_index);
705 /* 745 msleep(rc_delay[trio_index][mac] * 1000);
706 * Check for PCIe link-up status. 746 } else {
707 */ 747 /*
708 748 * Wait a bit here because some EP devices
709 reg_offset = 749 * take longer to come up.
710 (TRIO_PCIE_INTFC_PORT_STATUS << 750 */
711 TRIO_CFG_REGION_ADDR__REG_SHIFT) | 751 msleep(1000);
712 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE << 752 }
713 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
714 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
715 753
754 /* Check for PCIe link-up status again. */
716 port_status.word = 755 port_status.word =
717 __gxio_mmio_read(trio_context->mmio_base_mac + 756 __gxio_mmio_read(trio_context->mmio_base_mac +
718 reg_offset); 757 reg_offset);
719 if (!port_status.dl_up) { 758 if (!port_status.dl_up) {
720 pr_err("PCI: link is down, MAC %d on TRIO %d\n", 759 if (pcie_ports[trio_index].ports[mac].removable) {
721 mac, trio_index); 760 pr_info("PCI: link is down, MAC %d on TRIO %d\n",
761 mac, trio_index);
762 pr_info("This is expected if no PCIe card"
763 " is connected to this link\n");
764 } else
765 pr_err("PCI: link is down, MAC %d on TRIO %d\n",
766 mac, trio_index);
722 continue; 767 continue;
723 } 768 }
724 769
@@ -744,7 +789,6 @@ int __init pcibios_init(void)
744 * Change the device ID so that Linux bus crawl doesn't confuse 789 * Change the device ID so that Linux bus crawl doesn't confuse
745 * the internal bridge with any Tilera endpoints. 790 * the internal bridge with any Tilera endpoints.
746 */ 791 */
747
748 reg_offset = 792 reg_offset =
749 (TRIO_PCIE_RC_DEVICE_ID_VEN_ID << 793 (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
750 TRIO_CFG_REGION_ADDR__REG_SHIFT) | 794 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -757,10 +801,7 @@ int __init pcibios_init(void)
757 TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) | 801 TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
758 TILERA_VENDOR_ID); 802 TILERA_VENDOR_ID);
759 803
760 /* 804 /* Set the internal P2P bridge class code. */
761 * Set the internal P2P bridge class code.
762 */
763
764 reg_offset = 805 reg_offset =
765 (TRIO_PCIE_RC_REVISION_ID << 806 (TRIO_PCIE_RC_REVISION_ID <<
766 TRIO_CFG_REGION_ADDR__REG_SHIFT) | 807 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
@@ -771,26 +812,22 @@ int __init pcibios_init(void)
771 class_code_revision = 812 class_code_revision =
772 __gxio_mmio_read32(trio_context->mmio_base_mac + 813 __gxio_mmio_read32(trio_context->mmio_base_mac +
773 reg_offset); 814 reg_offset);
774 class_code_revision = (class_code_revision & 0xff ) | 815 class_code_revision = (class_code_revision & 0xff) |
775 (PCI_CLASS_BRIDGE_PCI << 16); 816 (PCI_CLASS_BRIDGE_PCI << 16);
776 817
777 __gxio_mmio_write32(trio_context->mmio_base_mac + 818 __gxio_mmio_write32(trio_context->mmio_base_mac +
778 reg_offset, class_code_revision); 819 reg_offset, class_code_revision);
779 820
780#ifdef USE_SHARED_PCIE_CONFIG_REGION 821#ifdef USE_SHARED_PCIE_CONFIG_REGION
781 822
782 /* 823 /* Map in the MMIO space for the PIO region. */
783 * Map in the MMIO space for the PIO region.
784 */
785 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) | 824 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
786 (((unsigned long long)mac) << 825 (((unsigned long long)mac) <<
787 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT); 826 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
788 827
789#else 828#else
790 829
791 /* 830 /* Alloc a PIO region for PCI config access per MAC. */
792 * Alloc a PIO region for PCI config access per MAC.
793 */
794 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); 831 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
795 if (ret < 0) { 832 if (ret < 0) {
796 pr_err("PCI: PCI CFG PIO alloc failure for mac %d " 833 pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
@@ -801,9 +838,7 @@ int __init pcibios_init(void)
801 838
802 trio_context->pio_cfg_index[mac] = ret; 839 trio_context->pio_cfg_index[mac] = ret;
803 840
804 /* 841 /* For PIO CFG, the bus_address_hi parameter is 0. */
805 * For PIO CFG, the bus_address_hi parameter is 0.
806 */
807 ret = gxio_trio_init_pio_region_aux(trio_context, 842 ret = gxio_trio_init_pio_region_aux(trio_context,
808 trio_context->pio_cfg_index[mac], 843 trio_context->pio_cfg_index[mac],
809 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE); 844 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
@@ -820,9 +855,15 @@ int __init pcibios_init(void)
820 855
821#endif 856#endif
822 857
858 /*
859 * To save VMALLOC space, we take advantage of the fact that
860 * bit 29 in the PIO CFG address format is reserved 0. With
861 * TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT being 30,
862 * this cuts VMALLOC space usage from 1GB to 512MB per mac.
863 */
823 trio_context->mmio_base_pio_cfg[mac] = 864 trio_context->mmio_base_pio_cfg[mac] =
824 iorpc_ioremap(trio_context->fd, offset, 865 iorpc_ioremap(trio_context->fd, offset, (1UL <<
825 (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT)); 866 (TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT - 1)));
826 if (trio_context->mmio_base_pio_cfg[mac] == NULL) { 867 if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
827 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n", 868 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
828 mac, trio_index); 869 mac, trio_index);
@@ -830,9 +871,7 @@ int __init pcibios_init(void)
830 continue; 871 continue;
831 } 872 }
832 873
833 /* 874 /* Initialize the PCIe interrupts. */
834 * Initialize the PCIe interrupts.
835 */
836 if (tile_init_irqs(controller)) { 875 if (tile_init_irqs(controller)) {
837 pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n", 876 pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
838 mac, trio_index); 877 mac, trio_index);
@@ -843,17 +882,16 @@ int __init pcibios_init(void)
843 /* 882 /*
844 * The PCI memory resource is located above the PA space. 883 * The PCI memory resource is located above the PA space.
845 * The memory range for the PCI root bus should not overlap 884 * The memory range for the PCI root bus should not overlap
846 * with the physical RAM 885 * with the physical RAM.
847 */ 886 */
848 pci_add_resource_offset(&resources, &controller->mem_space, 887 pci_add_resource_offset(&resources, &controller->mem_space,
849 controller->mem_offset); 888 controller->mem_offset);
850 889 pci_add_resource(&resources, &controller->io_space);
851 controller->first_busno = next_busno; 890 controller->first_busno = next_busno;
852 bus = pci_scan_root_bus(NULL, next_busno, controller->ops, 891 bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
853 controller, &resources); 892 controller, &resources);
854 controller->root_bus = bus; 893 controller->root_bus = bus;
855 next_busno = bus->busn_res.end + 1; 894 next_busno = bus->busn_res.end + 1;
856
857 } 895 }
858 896
859 /* Do machine dependent PCI interrupt routing */ 897 /* Do machine dependent PCI interrupt routing */
@@ -865,7 +903,6 @@ int __init pcibios_init(void)
865 * It allocates all of the resources (I/O memory, etc) 903 * It allocates all of the resources (I/O memory, etc)
866 * associated with the devices read in above. 904 * associated with the devices read in above.
867 */ 905 */
868
869 pci_assign_unassigned_resources(); 906 pci_assign_unassigned_resources();
870 907
871 /* Record the I/O resources in the PCI controller structure. */ 908 /* Record the I/O resources in the PCI controller structure. */
@@ -873,9 +910,6 @@ int __init pcibios_init(void)
873 struct pci_controller *controller = &pci_controllers[i]; 910 struct pci_controller *controller = &pci_controllers[i];
874 gxio_trio_context_t *trio_context = controller->trio; 911 gxio_trio_context_t *trio_context = controller->trio;
875 struct pci_bus *root_bus = pci_controllers[i].root_bus; 912 struct pci_bus *root_bus = pci_controllers[i].root_bus;
876 struct pci_bus *next_bus;
877 uint32_t bus_address_hi;
878 struct pci_dev *dev;
879 int ret; 913 int ret;
880 int j; 914 int j;
881 915
@@ -889,43 +923,12 @@ int __init pcibios_init(void)
889 /* Configure the max_payload_size values for this domain. */ 923 /* Configure the max_payload_size values for this domain. */
890 fixup_read_and_payload_sizes(controller); 924 fixup_read_and_payload_sizes(controller);
891 925
892 list_for_each_entry(dev, &root_bus->devices, bus_list) { 926 /* Alloc a PIO region for PCI memory access for each RC port. */
893 /* Find the PCI host controller, ie. the 1st bridge. */
894 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
895 (PCI_SLOT(dev->devfn) == 0)) {
896 next_bus = dev->subordinate;
897 pci_controllers[i].mem_resources[0] =
898 *next_bus->resource[0];
899 pci_controllers[i].mem_resources[1] =
900 *next_bus->resource[1];
901 pci_controllers[i].mem_resources[2] =
902 *next_bus->resource[2];
903
904 break;
905 }
906 }
907
908 if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM)
909 bus_address_hi =
910 pci_controllers[i].mem_resources[1].start >> 32;
911 else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH)
912 bus_address_hi =
913 pci_controllers[i].mem_resources[2].start >> 32;
914 else {
915 /* This is unlikely. */
916 pr_err("PCI: no memory resources on TRIO %d mac %d\n",
917 controller->trio_index, controller->mac);
918 continue;
919 }
920
921 /*
922 * Alloc a PIO region for PCI memory access for each RC port.
923 */
924 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0); 927 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
925 if (ret < 0) { 928 if (ret < 0) {
926 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, " 929 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
927 "give up\n", controller->trio_index, 930 "give up\n", controller->trio_index,
928 controller->mac); 931 controller->mac);
929 932
930 continue; 933 continue;
931 } 934 }
@@ -943,12 +946,45 @@ int __init pcibios_init(void)
943 0); 946 0);
944 if (ret < 0) { 947 if (ret < 0) {
945 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, " 948 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
946 "give up\n", controller->trio_index, 949 "give up\n", controller->trio_index,
947 controller->mac); 950 controller->mac);
951
952 continue;
953 }
954
955#ifdef CONFIG_TILE_PCI_IO
956 /*
957 * Alloc a PIO region for PCI I/O space access for each RC port.
958 */
959 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
960 if (ret < 0) {
961 pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
962 "give up\n", controller->trio_index,
963 controller->mac);
948 964
949 continue; 965 continue;
950 } 966 }
951 967
968 controller->pio_io_index = ret;
969
970 /*
971 * For PIO IO, the bus_address_hi parameter is hard-coded 0
972 * because PCI I/O address space is 32-bit.
973 */
974 ret = gxio_trio_init_pio_region_aux(trio_context,
975 controller->pio_io_index,
976 controller->mac,
977 0,
978 HV_TRIO_PIO_FLAG_IO_SPACE);
979 if (ret < 0) {
980 pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
981 "give up\n", controller->trio_index,
982 controller->mac);
983
984 continue;
985 }
986#endif
987
952 /* 988 /*
953 * Configure a Mem-Map region for each memory controller so 989 * Configure a Mem-Map region for each memory controller so
954 * that Linux can map all of its PA space to the PCI bus. 990 * that Linux can map all of its PA space to the PCI bus.
@@ -963,9 +999,9 @@ int __init pcibios_init(void)
963 0); 999 0);
964 if (ret < 0) { 1000 if (ret < 0) {
965 pr_err("PCI: Mem-Map alloc failure on TRIO %d " 1001 pr_err("PCI: Mem-Map alloc failure on TRIO %d "
966 "mac %d for MC %d, give up\n", 1002 "mac %d for MC %d, give up\n",
967 controller->trio_index, 1003 controller->trio_index,
968 controller->mac, j); 1004 controller->mac, j);
969 1005
970 goto alloc_mem_map_failed; 1006 goto alloc_mem_map_failed;
971 } 1007 }
@@ -996,9 +1032,9 @@ int __init pcibios_init(void)
996 GXIO_TRIO_ORDER_MODE_UNORDERED); 1032 GXIO_TRIO_ORDER_MODE_UNORDERED);
997 if (ret < 0) { 1033 if (ret < 0) {
998 pr_err("PCI: Mem-Map init failure on TRIO %d " 1034 pr_err("PCI: Mem-Map init failure on TRIO %d "
999 "mac %d for MC %d, give up\n", 1035 "mac %d for MC %d, give up\n",
1000 controller->trio_index, 1036 controller->trio_index,
1001 controller->mac, j); 1037 controller->mac, j);
1002 1038
1003 goto alloc_mem_map_failed; 1039 goto alloc_mem_map_failed;
1004 } 1040 }
@@ -1007,23 +1043,19 @@ int __init pcibios_init(void)
1007alloc_mem_map_failed: 1043alloc_mem_map_failed:
1008 break; 1044 break;
1009 } 1045 }
1010
1011 } 1046 }
1012 1047
1013 return 0; 1048 return 0;
1014} 1049}
1015subsys_initcall(pcibios_init); 1050subsys_initcall(pcibios_init);
1016 1051
1017/* Note: to be deleted after Linux 3.6 merge. */ 1052/* No bus fixups needed. */
1018void pcibios_fixup_bus(struct pci_bus *bus) 1053void pcibios_fixup_bus(struct pci_bus *bus)
1019{ 1054{
1020} 1055}
1021 1056
1022/* 1057/* Process any "pci=" kernel boot arguments. */
1023 * This can be called from the generic PCI layer, but doesn't need to 1058char *__init pcibios_setup(char *str)
1024 * do anything.
1025 */
1026char *pcibios_setup(char *str)
1027{ 1059{
1028 if (!strcmp(str, "off")) { 1060 if (!strcmp(str, "off")) {
1029 pci_probe = 0; 1061 pci_probe = 0;
@@ -1034,8 +1066,7 @@ char *pcibios_setup(char *str)
1034 1066
1035/* 1067/*
1036 * Enable memory address decoding, as appropriate, for the 1068 * Enable memory address decoding, as appropriate, for the
1037 * device described by the 'dev' struct. The I/O decoding 1069 * device described by the 'dev' struct.
1038 * is disabled, though the TILE-Gx supports I/O addressing.
1039 * 1070 *
1040 * This is called from the generic PCI layer, and can be called 1071 * This is called from the generic PCI layer, and can be called
1041 * for bridges or endpoints. 1072 * for bridges or endpoints.
@@ -1045,13 +1076,24 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1045 return pci_enable_resources(dev, mask); 1076 return pci_enable_resources(dev, mask);
1046} 1077}
1047 1078
1048/* Called for each device after PCI setup is done. */ 1079/*
1080 * Called for each device after PCI setup is done.
1081 * We initialize the PCI device capabilities conservatively, assuming that
1082 * all devices can only address the 32-bit DMA space. The exception here is
1083 * that the device dma_offset is set to the value that matches the 64-bit
1084 * capable devices. This is OK because dma_offset is not used by legacy
1085 * dma_ops, nor by the hybrid dma_ops's streaming DMAs, which are 64-bit ops.
1086 * This implementation matches the kernel design of setting PCI devices'
1087 * coherent_dma_mask to 0xffffffffull by default, allowing the device drivers
1088 * to skip calling pci_set_consistent_dma_mask(DMA_BIT_MASK(32)).
1089 */
1049static void pcibios_fixup_final(struct pci_dev *pdev) 1090static void pcibios_fixup_final(struct pci_dev *pdev)
1050{ 1091{
1051 set_dma_ops(&pdev->dev, gx_pci_dma_map_ops); 1092 set_dma_ops(&pdev->dev, gx_legacy_pci_dma_map_ops);
1052 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET); 1093 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
1053 pdev->dev.archdata.max_direct_dma_addr = 1094 pdev->dev.archdata.max_direct_dma_addr =
1054 TILE_PCI_MAX_DIRECT_DMA_ADDRESS; 1095 TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1096 pdev->dev.coherent_dma_mask = TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1055} 1097}
1056DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final); 1098DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
1057 1099
@@ -1065,19 +1107,15 @@ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
1065 resource_size_t start; 1107 resource_size_t start;
1066 resource_size_t end; 1108 resource_size_t end;
1067 int trio_fd; 1109 int trio_fd;
1068 int i, j; 1110 int i;
1069 1111
1070 start = phys_addr; 1112 start = phys_addr;
1071 end = phys_addr + size - 1; 1113 end = phys_addr + size - 1;
1072 1114
1073 /* 1115 /*
1074 * In the following, each PCI controller's mem_resources[1] 1116 * By searching phys_addr in each controller's mem_space, we can
1075 * represents its (non-prefetchable) PCI memory resource and
1076 * mem_resources[2] refers to its prefetchable PCI memory resource.
1077 * By searching phys_addr in each controller's mem_resources[], we can
1078 * determine the controller that should accept the PCI memory access. 1117 * determine the controller that should accept the PCI memory access.
1079 */ 1118 */
1080
1081 for (i = 0; i < num_rc_controllers; i++) { 1119 for (i = 0; i < num_rc_controllers; i++) {
1082 /* 1120 /*
1083 * Skip controllers that are not properly initialized or 1121 * Skip controllers that are not properly initialized or
@@ -1086,25 +1124,18 @@ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
1086 if (pci_controllers[i].root_bus == NULL) 1124 if (pci_controllers[i].root_bus == NULL)
1087 continue; 1125 continue;
1088 1126
1089 for (j = 1; j < 3; j++) { 1127 bar_start = pci_controllers[i].mem_space.start;
1090 bar_start = 1128 bar_end = pci_controllers[i].mem_space.end;
1091 pci_controllers[i].mem_resources[j].start;
1092 bar_end =
1093 pci_controllers[i].mem_resources[j].end;
1094 1129
1095 if ((start >= bar_start) && (end <= bar_end)) { 1130 if ((start >= bar_start) && (end <= bar_end)) {
1096 1131 controller = &pci_controllers[i];
1097 controller = &pci_controllers[i]; 1132 break;
1098
1099 goto got_it;
1100 }
1101 } 1133 }
1102 } 1134 }
1103 1135
1104 if (controller == NULL) 1136 if (controller == NULL)
1105 return NULL; 1137 return NULL;
1106 1138
1107got_it:
1108 trio_fd = controller->trio->fd; 1139 trio_fd = controller->trio->fd;
1109 1140
1110 /* Convert the resource start to the bus address offset. */ 1141 /* Convert the resource start to the bus address offset. */
@@ -1112,14 +1143,71 @@ got_it:
1112 1143
1113 offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start; 1144 offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
1114 1145
1115 /* 1146 /* We need to keep the PCI bus address's in-page offset in the VA. */
1116 * We need to keep the PCI bus address's in-page offset in the VA.
1117 */
1118 return iorpc_ioremap(trio_fd, offset, size) + 1147 return iorpc_ioremap(trio_fd, offset, size) +
1119 (phys_addr & (PAGE_SIZE - 1)); 1148 (start & (PAGE_SIZE - 1));
1120} 1149}
1121EXPORT_SYMBOL(ioremap); 1150EXPORT_SYMBOL(ioremap);
1122 1151
1152#ifdef CONFIG_TILE_PCI_IO
1153/* Map a PCI I/O address into VA space. */
1154void __iomem *ioport_map(unsigned long port, unsigned int size)
1155{
1156 struct pci_controller *controller = NULL;
1157 resource_size_t bar_start;
1158 resource_size_t bar_end;
1159 resource_size_t offset;
1160 resource_size_t start;
1161 resource_size_t end;
1162 int trio_fd;
1163 int i;
1164
1165 start = port;
1166 end = port + size - 1;
1167
1168 /*
1169 * By searching the port in each controller's io_space, we can
1170 * determine the controller that should accept the PCI I/O access.
1171 */
1172 for (i = 0; i < num_rc_controllers; i++) {
1173 /*
1174 * Skip controllers that are not properly initialized or
1175 * have down links.
1176 */
1177 if (pci_controllers[i].root_bus == NULL)
1178 continue;
1179
1180 bar_start = pci_controllers[i].io_space.start;
1181 bar_end = pci_controllers[i].io_space.end;
1182
1183 if ((start >= bar_start) && (end <= bar_end)) {
1184 controller = &pci_controllers[i];
1185 break;
1186 }
1187 }
1188
1189 if (controller == NULL)
1190 return NULL;
1191
1192 trio_fd = controller->trio->fd;
1193
1194 /* Convert the resource start to the bus address offset. */
1195 port -= controller->io_space.start;
1196
1197 offset = HV_TRIO_PIO_OFFSET(controller->pio_io_index) + port;
1198
1199 /* We need to keep the PCI bus address's in-page offset in the VA. */
1200 return iorpc_ioremap(trio_fd, offset, size) + (port & (PAGE_SIZE - 1));
1201}
1202EXPORT_SYMBOL(ioport_map);
1203
1204void ioport_unmap(void __iomem *addr)
1205{
1206 iounmap(addr);
1207}
1208EXPORT_SYMBOL(ioport_unmap);
1209#endif
1210
1123void pci_iounmap(struct pci_dev *dev, void __iomem *addr) 1211void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1124{ 1212{
1125 iounmap(addr); 1213 iounmap(addr);
@@ -1141,7 +1229,6 @@ EXPORT_SYMBOL(pci_iounmap);
1141 * offset is in bytes, from the start of config space for the 1229 * offset is in bytes, from the start of config space for the
1142 * specified bus & device. 1230 * specified bus & device.
1143 */ 1231 */
1144
1145static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset, 1232static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
1146 int size, u32 *val) 1233 int size, u32 *val)
1147{ 1234{
@@ -1191,7 +1278,6 @@ static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
1191 * Accesses to the directly attached device have to be 1278 * Accesses to the directly attached device have to be
1192 * sent as type-0 configs. 1279 * sent as type-0 configs.
1193 */ 1280 */
1194
1195 if (busnum == (controller->first_busno + 1)) { 1281 if (busnum == (controller->first_busno + 1)) {
1196 /* 1282 /*
1197 * There is only one device off of our built-in P2P bridge. 1283 * There is only one device off of our built-in P2P bridge.
@@ -1213,9 +1299,8 @@ static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
1213 * Note that we don't set the mac field in cfg_addr because the 1299 * Note that we don't set the mac field in cfg_addr because the
1214 * mapping is per port. 1300 * mapping is per port.
1215 */ 1301 */
1216
1217 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + 1302 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1218 cfg_addr.word; 1303 cfg_addr.word;
1219 1304
1220valid_device: 1305valid_device:
1221 1306
@@ -1319,7 +1404,6 @@ static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
1319 * Accesses to the directly attached device have to be 1404 * Accesses to the directly attached device have to be
1320 * sent as type-0 configs. 1405 * sent as type-0 configs.
1321 */ 1406 */
1322
1323 if (busnum == (controller->first_busno + 1)) { 1407 if (busnum == (controller->first_busno + 1)) {
1324 /* 1408 /*
1325 * There is only one device off of our built-in P2P bridge. 1409 * There is only one device off of our built-in P2P bridge.
@@ -1341,7 +1425,6 @@ static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
1341 * Note that we don't set the mac field in cfg_addr because the 1425 * Note that we don't set the mac field in cfg_addr because the
1342 * mapping is per port. 1426 * mapping is per port.
1343 */ 1427 */
1344
1345 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] + 1428 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1346 cfg_addr.word; 1429 cfg_addr.word;
1347 1430
@@ -1379,11 +1462,8 @@ static struct pci_ops tile_cfg_ops = {
1379}; 1462};
1380 1463
1381 1464
1382/* 1465/* MSI support starts here. */
1383 * MSI support starts here. 1466static unsigned int tilegx_msi_startup(struct irq_data *d)
1384 */
1385static unsigned int
1386tilegx_msi_startup(struct irq_data *d)
1387{ 1467{
1388 if (d->msi_desc) 1468 if (d->msi_desc)
1389 unmask_msi_irq(d); 1469 unmask_msi_irq(d);
@@ -1391,21 +1471,18 @@ tilegx_msi_startup(struct irq_data *d)
1391 return 0; 1471 return 0;
1392} 1472}
1393 1473
1394static void 1474static void tilegx_msi_ack(struct irq_data *d)
1395tilegx_msi_ack(struct irq_data *d)
1396{ 1475{
1397 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq); 1476 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
1398} 1477}
1399 1478
1400static void 1479static void tilegx_msi_mask(struct irq_data *d)
1401tilegx_msi_mask(struct irq_data *d)
1402{ 1480{
1403 mask_msi_irq(d); 1481 mask_msi_irq(d);
1404 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); 1482 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
1405} 1483}
1406 1484
1407static void 1485static void tilegx_msi_unmask(struct irq_data *d)
1408tilegx_msi_unmask(struct irq_data *d)
1409{ 1486{
1410 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); 1487 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1411 unmask_msi_irq(d); 1488 unmask_msi_irq(d);
@@ -1462,32 +1539,55 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1462 trio_context = controller->trio; 1539 trio_context = controller->trio;
1463 1540
1464 /* 1541 /*
1465 * Allocate the Mem-Map that will accept the MSI write and 1542 * Allocate a scatter-queue that will accept the MSI write and
1466 * trigger the TILE-side interrupts. 1543 * trigger the TILE-side interrupts. We use the scatter-queue regions
1544 * before the mem map regions, because the latter are needed by more
1545 * applications.
1467 */ 1546 */
1468 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0); 1547 mem_map = gxio_trio_alloc_scatter_queues(trio_context, 1, 0, 0);
1469 if (mem_map < 0) { 1548 if (mem_map >= 0) {
1470 dev_printk(KERN_INFO, &pdev->dev, 1549 TRIO_MAP_SQ_DOORBELL_FMT_t doorbell_template = {{
1471 "%s Mem-Map alloc failure. " 1550 .pop = 0,
1472 "Failed to initialize MSI interrupts. " 1551 .doorbell = 1,
1473 "Falling back to legacy interrupts.\n", 1552 }};
1474 desc->msi_attrib.is_msix ? "MSI-X" : "MSI"); 1553
1554 mem_map += TRIO_NUM_MAP_MEM_REGIONS;
1555 mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
1556 mem_map * MEM_MAP_INTR_REGION_SIZE;
1557 mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
1558
1559 msi_addr = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 8;
1560 msg.data = (unsigned int)doorbell_template.word;
1561 } else {
1562 /* SQ regions are out, allocate from map mem regions. */
1563 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
1564 if (mem_map < 0) {
1565 dev_printk(KERN_INFO, &pdev->dev,
1566 "%s Mem-Map alloc failure. "
1567 "Failed to initialize MSI interrupts. "
1568 "Falling back to legacy interrupts.\n",
1569 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1570 ret = -ENOMEM;
1571 goto msi_mem_map_alloc_failure;
1572 }
1475 1573
1476 ret = -ENOMEM; 1574 mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
1477 goto msi_mem_map_alloc_failure; 1575 mem_map * MEM_MAP_INTR_REGION_SIZE;
1576 mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
1577
1578 msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 -
1579 TRIO_MAP_MEM_REG_INT0;
1580
1581 msg.data = mem_map;
1478 } 1582 }
1479 1583
1480 /* We try to distribute different IRQs to different tiles. */ 1584 /* We try to distribute different IRQs to different tiles. */
1481 cpu = tile_irq_cpu(irq); 1585 cpu = tile_irq_cpu(irq);
1482 1586
1483 /* 1587 /*
1484 * Now call up to the HV to configure the Mem-Map interrupt and 1588 * Now call up to the HV to configure the MSI interrupt and
1485 * set up the IPI binding. 1589 * set up the IPI binding.
1486 */ 1590 */
1487 mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
1488 mem_map * MEM_MAP_INTR_REGION_SIZE;
1489 mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
1490
1491 ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu), 1591 ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
1492 KERNEL_PL, irq, controller->mac, 1592 KERNEL_PL, irq, controller->mac,
1493 mem_map, mem_map_base, mem_map_limit, 1593 mem_map, mem_map_base, mem_map_limit,
@@ -1500,13 +1600,9 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1500 1600
1501 irq_set_msi_desc(irq, desc); 1601 irq_set_msi_desc(irq, desc);
1502 1602
1503 msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0;
1504
1505 msg.address_hi = msi_addr >> 32; 1603 msg.address_hi = msi_addr >> 32;
1506 msg.address_lo = msi_addr & 0xffffffff; 1604 msg.address_lo = msi_addr & 0xffffffff;
1507 1605
1508 msg.data = mem_map;
1509
1510 write_msi_msg(irq, &msg); 1606 write_msi_msg(irq, &msg);
1511 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); 1607 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
1512 irq_set_handler_data(irq, controller); 1608 irq_set_handler_data(irq, controller);
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index dafc447b5125..681100c59fda 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -113,7 +113,6 @@ arch_initcall(proc_tile_init);
113 * Support /proc/sys/tile directory 113 * Support /proc/sys/tile directory
114 */ 114 */
115 115
116#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
117static ctl_table unaligned_subtable[] = { 116static ctl_table unaligned_subtable[] = {
118 { 117 {
119 .procname = "enabled", 118 .procname = "enabled",
@@ -160,4 +159,3 @@ static int __init proc_sys_tile_init(void)
160} 159}
161 160
162arch_initcall(proc_sys_tile_init); 161arch_initcall(proc_sys_tile_init);
163#endif
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 8ac304484f98..16ed58948757 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -33,6 +33,7 @@
33#include <asm/syscalls.h> 33#include <asm/syscalls.h>
34#include <asm/traps.h> 34#include <asm/traps.h>
35#include <asm/setup.h> 35#include <asm/setup.h>
36#include <asm/uaccess.h>
36#ifdef CONFIG_HARDWALL 37#ifdef CONFIG_HARDWALL
37#include <asm/hardwall.h> 38#include <asm/hardwall.h>
38#endif 39#endif
@@ -74,19 +75,6 @@ void arch_release_thread_info(struct thread_info *info)
74{ 75{
75 struct single_step_state *step_state = info->step_state; 76 struct single_step_state *step_state = info->step_state;
76 77
77#ifdef CONFIG_HARDWALL
78 /*
79 * We free a thread_info from the context of the task that has
80 * been scheduled next, so the original task is already dead.
81 * Calling deactivate here just frees up the data structures.
82 * If the task we're freeing held the last reference to a
83 * hardwall fd, it would have been released prior to this point
84 * anyway via exit_files(), and the hardwall_task.info pointers
85 * would be NULL by now.
86 */
87 hardwall_deactivate_all(info->task);
88#endif
89
90 if (step_state) { 78 if (step_state) {
91 79
92 /* 80 /*
@@ -160,6 +148,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
160 */ 148 */
161 task_thread_info(p)->step_state = NULL; 149 task_thread_info(p)->step_state = NULL;
162 150
151#ifdef __tilegx__
152 /*
153 * Do not clone unalign jit fixup from the parent; each thread
154 * must allocate its own on demand.
155 */
156 task_thread_info(p)->unalign_jit_base = NULL;
157#endif
158
163 /* 159 /*
164 * Copy the registers onto the kernel stack so the 160 * Copy the registers onto the kernel stack so the
165 * return-from-interrupt code will reload it into registers. 161 * return-from-interrupt code will reload it into registers.
@@ -191,16 +187,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
191 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb)); 187 memset(&p->thread.dma_async_tlb, 0, sizeof(struct async_tlb));
192#endif 188#endif
193 189
194#if CHIP_HAS_SN_PROC()
195 /* Likewise, the new thread is not running static processor code. */
196 p->thread.sn_proc_running = 0;
197 memset(&p->thread.sn_async_tlb, 0, sizeof(struct async_tlb));
198#endif
199
200#if CHIP_HAS_PROC_STATUS_SPR()
201 /* New thread has its miscellaneous processor state bits clear. */ 190 /* New thread has its miscellaneous processor state bits clear. */
202 p->thread.proc_status = 0; 191 p->thread.proc_status = 0;
203#endif
204 192
205#ifdef CONFIG_HARDWALL 193#ifdef CONFIG_HARDWALL
206 /* New thread does not own any networks. */ 194 /* New thread does not own any networks. */
@@ -218,19 +206,32 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
218 return 0; 206 return 0;
219} 207}
220 208
209int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
210{
211 task_thread_info(tsk)->align_ctl = val;
212 return 0;
213}
214
215int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
216{
217 return put_user(task_thread_info(tsk)->align_ctl,
218 (unsigned int __user *)adr);
219}
220
221static struct task_struct corrupt_current = { .comm = "<corrupt>" };
222
221/* 223/*
222 * Return "current" if it looks plausible, or else a pointer to a dummy. 224 * Return "current" if it looks plausible, or else a pointer to a dummy.
223 * This can be helpful if we are just trying to emit a clean panic. 225 * This can be helpful if we are just trying to emit a clean panic.
224 */ 226 */
225struct task_struct *validate_current(void) 227struct task_struct *validate_current(void)
226{ 228{
227 static struct task_struct corrupt = { .comm = "<corrupt>" };
228 struct task_struct *tsk = current; 229 struct task_struct *tsk = current;
229 if (unlikely((unsigned long)tsk < PAGE_OFFSET || 230 if (unlikely((unsigned long)tsk < PAGE_OFFSET ||
230 (high_memory && (void *)tsk > high_memory) || 231 (high_memory && (void *)tsk > high_memory) ||
231 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) { 232 ((unsigned long)tsk & (__alignof__(*tsk) - 1)) != 0)) {
232 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer); 233 pr_err("Corrupt 'current' %p (sp %#lx)\n", tsk, stack_pointer);
233 tsk = &corrupt; 234 tsk = &corrupt_current;
234 } 235 }
235 return tsk; 236 return tsk;
236} 237}
@@ -369,15 +370,11 @@ static void save_arch_state(struct thread_struct *t)
369 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2); 370 t->system_save[2] = __insn_mfspr(SPR_SYSTEM_SAVE_0_2);
370 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3); 371 t->system_save[3] = __insn_mfspr(SPR_SYSTEM_SAVE_0_3);
371 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS); 372 t->intctrl_0 = __insn_mfspr(SPR_INTCTRL_0_STATUS);
372#if CHIP_HAS_PROC_STATUS_SPR()
373 t->proc_status = __insn_mfspr(SPR_PROC_STATUS); 373 t->proc_status = __insn_mfspr(SPR_PROC_STATUS);
374#endif
375#if !CHIP_HAS_FIXED_INTVEC_BASE() 374#if !CHIP_HAS_FIXED_INTVEC_BASE()
376 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0); 375 t->interrupt_vector_base = __insn_mfspr(SPR_INTERRUPT_VECTOR_BASE_0);
377#endif 376#endif
378#if CHIP_HAS_TILE_RTF_HWM()
379 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM); 377 t->tile_rtf_hwm = __insn_mfspr(SPR_TILE_RTF_HWM);
380#endif
381#if CHIP_HAS_DSTREAM_PF() 378#if CHIP_HAS_DSTREAM_PF()
382 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF); 379 t->dstream_pf = __insn_mfspr(SPR_DSTREAM_PF);
383#endif 380#endif
@@ -398,15 +395,11 @@ static void restore_arch_state(const struct thread_struct *t)
398 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]); 395 __insn_mtspr(SPR_SYSTEM_SAVE_0_2, t->system_save[2]);
399 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]); 396 __insn_mtspr(SPR_SYSTEM_SAVE_0_3, t->system_save[3]);
400 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0); 397 __insn_mtspr(SPR_INTCTRL_0_STATUS, t->intctrl_0);
401#if CHIP_HAS_PROC_STATUS_SPR()
402 __insn_mtspr(SPR_PROC_STATUS, t->proc_status); 398 __insn_mtspr(SPR_PROC_STATUS, t->proc_status);
403#endif
404#if !CHIP_HAS_FIXED_INTVEC_BASE() 399#if !CHIP_HAS_FIXED_INTVEC_BASE()
405 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base); 400 __insn_mtspr(SPR_INTERRUPT_VECTOR_BASE_0, t->interrupt_vector_base);
406#endif 401#endif
407#if CHIP_HAS_TILE_RTF_HWM()
408 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm); 402 __insn_mtspr(SPR_TILE_RTF_HWM, t->tile_rtf_hwm);
409#endif
410#if CHIP_HAS_DSTREAM_PF() 403#if CHIP_HAS_DSTREAM_PF()
411 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf); 404 __insn_mtspr(SPR_DSTREAM_PF, t->dstream_pf);
412#endif 405#endif
@@ -415,26 +408,11 @@ static void restore_arch_state(const struct thread_struct *t)
415 408
416void _prepare_arch_switch(struct task_struct *next) 409void _prepare_arch_switch(struct task_struct *next)
417{ 410{
418#if CHIP_HAS_SN_PROC()
419 int snctl;
420#endif
421#if CHIP_HAS_TILE_DMA() 411#if CHIP_HAS_TILE_DMA()
422 struct tile_dma_state *dma = &current->thread.tile_dma_state; 412 struct tile_dma_state *dma = &current->thread.tile_dma_state;
423 if (dma->enabled) 413 if (dma->enabled)
424 save_tile_dma_state(dma); 414 save_tile_dma_state(dma);
425#endif 415#endif
426#if CHIP_HAS_SN_PROC()
427 /*
428 * Suspend the static network processor if it was running.
429 * We do not suspend the fabric itself, just like we don't
430 * try to suspend the UDN.
431 */
432 snctl = __insn_mfspr(SPR_SNCTL);
433 current->thread.sn_proc_running =
434 (snctl & SPR_SNCTL__FRZPROC_MASK) == 0;
435 if (current->thread.sn_proc_running)
436 __insn_mtspr(SPR_SNCTL, snctl | SPR_SNCTL__FRZPROC_MASK);
437#endif
438} 416}
439 417
440 418
@@ -462,17 +440,6 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
462 /* Restore other arch state. */ 440 /* Restore other arch state. */
463 restore_arch_state(&next->thread); 441 restore_arch_state(&next->thread);
464 442
465#if CHIP_HAS_SN_PROC()
466 /*
467 * Restart static network processor in the new process
468 * if it was running before.
469 */
470 if (next->thread.sn_proc_running) {
471 int snctl = __insn_mfspr(SPR_SNCTL);
472 __insn_mtspr(SPR_SNCTL, snctl & ~SPR_SNCTL__FRZPROC_MASK);
473 }
474#endif
475
476#ifdef CONFIG_HARDWALL 443#ifdef CONFIG_HARDWALL
477 /* Enable or disable access to the network registers appropriately. */ 444 /* Enable or disable access to the network registers appropriately. */
478 hardwall_switch_tasks(prev, next); 445 hardwall_switch_tasks(prev, next);
@@ -514,7 +481,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
514 schedule(); 481 schedule();
515 return 1; 482 return 1;
516 } 483 }
517#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 484#if CHIP_HAS_TILE_DMA()
518 if (thread_info_flags & _TIF_ASYNC_TLB) { 485 if (thread_info_flags & _TIF_ASYNC_TLB) {
519 do_async_page_fault(regs); 486 do_async_page_fault(regs);
520 return 1; 487 return 1;
@@ -564,7 +531,15 @@ void flush_thread(void)
564 */ 531 */
565void exit_thread(void) 532void exit_thread(void)
566{ 533{
567 /* Nothing */ 534#ifdef CONFIG_HARDWALL
535 /*
536 * Remove the task from the list of tasks that are associated
537 * with any live hardwalls. (If the task that is exiting held
538 * the last reference to a hardwall fd, it would already have
539 * been released and deactivated at this point.)
540 */
541 hardwall_deactivate_all(current);
542#endif
568} 543}
569 544
570void show_regs(struct pt_regs *regs) 545void show_regs(struct pt_regs *regs)
@@ -573,23 +548,24 @@ void show_regs(struct pt_regs *regs)
573 int i; 548 int i;
574 549
575 pr_err("\n"); 550 pr_err("\n");
576 show_regs_print_info(KERN_ERR); 551 if (tsk != &corrupt_current)
552 show_regs_print_info(KERN_ERR);
577#ifdef __tilegx__ 553#ifdef __tilegx__
578 for (i = 0; i < 51; i += 3) 554 for (i = 0; i < 17; i++)
579 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 555 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
580 i, regs->regs[i], i+1, regs->regs[i+1], 556 i, regs->regs[i], i+18, regs->regs[i+18],
581 i+2, regs->regs[i+2]); 557 i+36, regs->regs[i+36]);
582 pr_err(" r51: "REGFMT" r52: "REGFMT" tp : "REGFMT"\n", 558 pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
583 regs->regs[51], regs->regs[52], regs->tp); 559 regs->regs[17], regs->regs[35], regs->tp);
584 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr); 560 pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
585#else 561#else
586 for (i = 0; i < 52; i += 4) 562 for (i = 0; i < 13; i++)
587 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT 563 pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
588 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n", 564 " r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
589 i, regs->regs[i], i+1, regs->regs[i+1], 565 i, regs->regs[i], i+14, regs->regs[i+14],
590 i+2, regs->regs[i+2], i+3, regs->regs[i+3]); 566 i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
591 pr_err(" r52: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n", 567 pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
592 regs->regs[52], regs->tp, regs->sp, regs->lr); 568 regs->regs[13], regs->tp, regs->sp, regs->lr);
593#endif 569#endif
594 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n", 570 pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld\n",
595 regs->pc, regs->ex1, regs->faultnum); 571 regs->pc, regs->ex1, regs->faultnum);
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 0f83ed4602b2..de98c6ddf136 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -265,6 +265,21 @@ int do_syscall_trace_enter(struct pt_regs *regs)
265 265
266void do_syscall_trace_exit(struct pt_regs *regs) 266void do_syscall_trace_exit(struct pt_regs *regs)
267{ 267{
268 long errno;
269
270 /*
271 * The standard tile calling convention returns the value (or negative
272 * errno) in r0, and zero (or positive errno) in r1.
273 * It saves a couple of cycles on the hot path to do this work in
274 * registers only as we return, rather than updating the in-memory
275 * struct ptregs.
276 */
277 errno = (long) regs->regs[0];
278 if (errno < 0 && errno > -4096)
279 regs->regs[1] = -errno;
280 else
281 regs->regs[1] = 0;
282
268 if (test_thread_flag(TIF_SYSCALL_TRACE)) 283 if (test_thread_flag(TIF_SYSCALL_TRACE))
269 tracehook_report_syscall_exit(regs, 0); 284 tracehook_report_syscall_exit(regs, 0);
270 285
@@ -272,7 +287,7 @@ void do_syscall_trace_exit(struct pt_regs *regs)
272 trace_sys_exit(regs, regs->regs[0]); 287 trace_sys_exit(regs, regs->regs[0]);
273} 288}
274 289
275void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code) 290void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs)
276{ 291{
277 struct siginfo info; 292 struct siginfo info;
278 293
@@ -288,5 +303,5 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, int error_code)
288/* Handle synthetic interrupt delivered only by the simulator. */ 303/* Handle synthetic interrupt delivered only by the simulator. */
289void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num) 304void __kprobes do_breakpoint(struct pt_regs* regs, int fault_num)
290{ 305{
291 send_sigtrap(current, regs, fault_num); 306 send_sigtrap(current, regs);
292} 307}
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index d1b5c913ae72..6c5d2c070a12 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,6 @@
27 27
28void machine_halt(void) 28void machine_halt(void)
29{ 29{
30 warn_early_printk();
31 arch_local_irq_disable_all(); 30 arch_local_irq_disable_all();
32 smp_send_stop(); 31 smp_send_stop();
33 hv_halt(); 32 hv_halt();
@@ -35,7 +34,6 @@ void machine_halt(void)
35 34
36void machine_power_off(void) 35void machine_power_off(void)
37{ 36{
38 warn_early_printk();
39 arch_local_irq_disable_all(); 37 arch_local_irq_disable_all();
40 smp_send_stop(); 38 smp_send_stop();
41 hv_power_off(); 39 hv_power_off();
diff --git a/arch/tile/kernel/regs_32.S b/arch/tile/kernel/regs_32.S
index c12280c2d904..542cae17a93a 100644
--- a/arch/tile/kernel/regs_32.S
+++ b/arch/tile/kernel/regs_32.S
@@ -20,7 +20,7 @@
20#include <asm/switch_to.h> 20#include <asm/switch_to.h>
21 21
22/* 22/*
23 * See <asm/system.h>; called with prev and next task_struct pointers. 23 * See <asm/switch_to.h>; called with prev and next task_struct pointers.
24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork. 24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
25 * 25 *
26 * We want to save pc/sp in "prev", and get the new pc/sp from "next". 26 * We want to save pc/sp in "prev", and get the new pc/sp from "next".
@@ -39,7 +39,7 @@
39 */ 39 */
40 40
41#if CALLEE_SAVED_REGS_COUNT != 24 41#if CALLEE_SAVED_REGS_COUNT != 24
42# error Mismatch between <asm/system.h> and kernel/entry.S 42# error Mismatch between <asm/switch_to.h> and kernel/entry.S
43#endif 43#endif
44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4) 44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 4)
45 45
diff --git a/arch/tile/kernel/regs_64.S b/arch/tile/kernel/regs_64.S
index 0829fd01fa30..bbffcc6f340f 100644
--- a/arch/tile/kernel/regs_64.S
+++ b/arch/tile/kernel/regs_64.S
@@ -20,7 +20,7 @@
20#include <asm/switch_to.h> 20#include <asm/switch_to.h>
21 21
22/* 22/*
23 * See <asm/system.h>; called with prev and next task_struct pointers. 23 * See <asm/switch_to.h>; called with prev and next task_struct pointers.
24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork. 24 * "prev" is returned in r0 for _switch_to and also for ret_from_fork.
25 * 25 *
26 * We want to save pc/sp in "prev", and get the new pc/sp from "next". 26 * We want to save pc/sp in "prev", and get the new pc/sp from "next".
@@ -39,7 +39,7 @@
39 */ 39 */
40 40
41#if CALLEE_SAVED_REGS_COUNT != 24 41#if CALLEE_SAVED_REGS_COUNT != 24
42# error Mismatch between <asm/system.h> and kernel/entry.S 42# error Mismatch between <asm/switch_to.h> and kernel/entry.S
43#endif 43#endif
44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8) 44#define FRAME_SIZE ((2 + CALLEE_SAVED_REGS_COUNT) * 8)
45 45
diff --git a/arch/tile/kernel/relocate_kernel_32.S b/arch/tile/kernel/relocate_kernel_32.S
index 010b418515f8..e44fbcf8cbd5 100644
--- a/arch/tile/kernel/relocate_kernel_32.S
+++ b/arch/tile/kernel/relocate_kernel_32.S
@@ -20,15 +20,6 @@
20#include <asm/page.h> 20#include <asm/page.h>
21#include <hv/hypervisor.h> 21#include <hv/hypervisor.h>
22 22
23#define ___hvb MEM_SV_INTRPT + HV_GLUE_START_CPA
24
25#define ___hv_dispatch(f) (___hvb + (HV_DISPATCH_ENTRY_SIZE * f))
26
27#define ___hv_console_putc ___hv_dispatch(HV_DISPATCH_CONSOLE_PUTC)
28#define ___hv_halt ___hv_dispatch(HV_DISPATCH_HALT)
29#define ___hv_reexec ___hv_dispatch(HV_DISPATCH_REEXEC)
30#define ___hv_flush_remote ___hv_dispatch(HV_DISPATCH_FLUSH_REMOTE)
31
32#undef RELOCATE_NEW_KERNEL_VERBOSE 23#undef RELOCATE_NEW_KERNEL_VERBOSE
33 24
34STD_ENTRY(relocate_new_kernel) 25STD_ENTRY(relocate_new_kernel)
@@ -43,8 +34,8 @@ STD_ENTRY(relocate_new_kernel)
43 addi sp, sp, -8 34 addi sp, sp, -8
44 /* we now have a stack (whether we need one or not) */ 35 /* we now have a stack (whether we need one or not) */
45 36
46 moveli r40, lo16(___hv_console_putc) 37 moveli r40, lo16(hv_console_putc)
47 auli r40, r40, ha16(___hv_console_putc) 38 auli r40, r40, ha16(hv_console_putc)
48 39
49#ifdef RELOCATE_NEW_KERNEL_VERBOSE 40#ifdef RELOCATE_NEW_KERNEL_VERBOSE
50 moveli r0, 'r' 41 moveli r0, 'r'
@@ -86,7 +77,6 @@ STD_ENTRY(relocate_new_kernel)
86 move r30, sp 77 move r30, sp
87 addi sp, sp, -8 78 addi sp, sp, -8
88 79
89#if CHIP_HAS_CBOX_HOME_MAP()
90 /* 80 /*
91 * On TILEPro, we need to flush all tiles' caches, since we may 81 * On TILEPro, we need to flush all tiles' caches, since we may
92 * have been doing hash-for-home caching there. Note that we 82 * have been doing hash-for-home caching there. Note that we
@@ -114,15 +104,14 @@ STD_ENTRY(relocate_new_kernel)
114 } 104 }
115 { 105 {
116 move r8, zero /* asids */ 106 move r8, zero /* asids */
117 moveli r20, lo16(___hv_flush_remote) 107 moveli r20, lo16(hv_flush_remote)
118 } 108 }
119 { 109 {
120 move r9, zero /* asidcount */ 110 move r9, zero /* asidcount */
121 auli r20, r20, ha16(___hv_flush_remote) 111 auli r20, r20, ha16(hv_flush_remote)
122 } 112 }
123 113
124 jalr r20 114 jalr r20
125#endif
126 115
127 /* r33 is destination pointer, default to zero */ 116 /* r33 is destination pointer, default to zero */
128 117
@@ -175,8 +164,8 @@ STD_ENTRY(relocate_new_kernel)
175 move r0, r32 164 move r0, r32
176 moveli r1, 0 /* arg to hv_reexec is 64 bits */ 165 moveli r1, 0 /* arg to hv_reexec is 64 bits */
177 166
178 moveli r41, lo16(___hv_reexec) 167 moveli r41, lo16(hv_reexec)
179 auli r41, r41, ha16(___hv_reexec) 168 auli r41, r41, ha16(hv_reexec)
180 169
181 jalr r41 170 jalr r41
182 171
@@ -267,8 +256,8 @@ STD_ENTRY(relocate_new_kernel)
267 moveli r0, '\n' 256 moveli r0, '\n'
268 jalr r40 257 jalr r40
269.Lhalt: 258.Lhalt:
270 moveli r41, lo16(___hv_halt) 259 moveli r41, lo16(hv_halt)
271 auli r41, r41, ha16(___hv_halt) 260 auli r41, r41, ha16(hv_halt)
272 261
273 jalr r41 262 jalr r41
274 STD_ENDPROC(relocate_new_kernel) 263 STD_ENDPROC(relocate_new_kernel)
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
index 1c09a4f5a4ea..d9d8cf6176e8 100644
--- a/arch/tile/kernel/relocate_kernel_64.S
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -34,11 +34,11 @@ STD_ENTRY(relocate_new_kernel)
34 addi sp, sp, -8 34 addi sp, sp, -8
35 /* we now have a stack (whether we need one or not) */ 35 /* we now have a stack (whether we need one or not) */
36 36
37#ifdef RELOCATE_NEW_KERNEL_VERBOSE
37 moveli r40, hw2_last(hv_console_putc) 38 moveli r40, hw2_last(hv_console_putc)
38 shl16insli r40, r40, hw1(hv_console_putc) 39 shl16insli r40, r40, hw1(hv_console_putc)
39 shl16insli r40, r40, hw0(hv_console_putc) 40 shl16insli r40, r40, hw0(hv_console_putc)
40 41
41#ifdef RELOCATE_NEW_KERNEL_VERBOSE
42 moveli r0, 'r' 42 moveli r0, 'r'
43 jalr r40 43 jalr r40
44 44
@@ -78,7 +78,6 @@ STD_ENTRY(relocate_new_kernel)
78 move r30, sp 78 move r30, sp
79 addi sp, sp, -16 79 addi sp, sp, -16
80 80
81#if CHIP_HAS_CBOX_HOME_MAP()
82 /* 81 /*
83 * On TILE-GX, we need to flush all tiles' caches, since we may 82 * On TILE-GX, we need to flush all tiles' caches, since we may
84 * have been doing hash-for-home caching there. Note that we 83 * have been doing hash-for-home caching there. Note that we
@@ -116,7 +115,6 @@ STD_ENTRY(relocate_new_kernel)
116 shl16insli r20, r20, hw0(hv_flush_remote) 115 shl16insli r20, r20, hw0(hv_flush_remote)
117 116
118 jalr r20 117 jalr r20
119#endif
120 118
121 /* r33 is destination pointer, default to zero */ 119 /* r33 is destination pointer, default to zero */
122 120
@@ -176,10 +174,12 @@ STD_ENTRY(relocate_new_kernel)
176 174
177 /* we should not get here */ 175 /* we should not get here */
178 176
177#ifdef RELOCATE_NEW_KERNEL_VERBOSE
179 moveli r0, '?' 178 moveli r0, '?'
180 jalr r40 179 jalr r40
181 moveli r0, '\n' 180 moveli r0, '\n'
182 jalr r40 181 jalr r40
182#endif
183 183
184 j .Lhalt 184 j .Lhalt
185 185
@@ -237,7 +237,9 @@ STD_ENTRY(relocate_new_kernel)
237 j .Lloop 237 j .Lloop
238 238
239 239
240.Lerr: moveli r0, 'e' 240.Lerr:
241#ifdef RELOCATE_NEW_KERNEL_VERBOSE
242 moveli r0, 'e'
241 jalr r40 243 jalr r40
242 moveli r0, 'r' 244 moveli r0, 'r'
243 jalr r40 245 jalr r40
@@ -245,6 +247,7 @@ STD_ENTRY(relocate_new_kernel)
245 jalr r40 247 jalr r40
246 moveli r0, '\n' 248 moveli r0, '\n'
247 jalr r40 249 jalr r40
250#endif
248.Lhalt: 251.Lhalt:
249 moveli r41, hw2_last(hv_halt) 252 moveli r41, hw2_last(hv_halt)
250 shl16insli r41, r41, hw1(hv_halt) 253 shl16insli r41, r41, hw1(hv_halt)
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index eceb8344280f..4c34caea9dd3 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -154,6 +154,65 @@ static int __init setup_maxnodemem(char *str)
154} 154}
155early_param("maxnodemem", setup_maxnodemem); 155early_param("maxnodemem", setup_maxnodemem);
156 156
157struct memmap_entry {
158 u64 addr; /* start of memory segment */
159 u64 size; /* size of memory segment */
160};
161static struct memmap_entry memmap_map[64];
162static int memmap_nr;
163
164static void add_memmap_region(u64 addr, u64 size)
165{
166 if (memmap_nr >= ARRAY_SIZE(memmap_map)) {
167 pr_err("Ooops! Too many entries in the memory map!\n");
168 return;
169 }
170 memmap_map[memmap_nr].addr = addr;
171 memmap_map[memmap_nr].size = size;
172 memmap_nr++;
173}
174
175static int __init setup_memmap(char *p)
176{
177 char *oldp;
178 u64 start_at, mem_size;
179
180 if (!p)
181 return -EINVAL;
182
183 if (!strncmp(p, "exactmap", 8)) {
184 pr_err("\"memmap=exactmap\" not valid on tile\n");
185 return 0;
186 }
187
188 oldp = p;
189 mem_size = memparse(p, &p);
190 if (p == oldp)
191 return -EINVAL;
192
193 if (*p == '@') {
194 pr_err("\"memmap=nn@ss\" (force RAM) invalid on tile\n");
195 } else if (*p == '#') {
196 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on tile\n");
197 } else if (*p == '$') {
198 start_at = memparse(p+1, &p);
199 add_memmap_region(start_at, mem_size);
200 } else {
201 if (mem_size == 0)
202 return -EINVAL;
203 maxmem_pfn = (mem_size >> HPAGE_SHIFT) <<
204 (HPAGE_SHIFT - PAGE_SHIFT);
205 }
206 return *p == '\0' ? 0 : -EINVAL;
207}
208early_param("memmap", setup_memmap);
209
210static int __init setup_mem(char *str)
211{
212 return setup_maxmem(str);
213}
214early_param("mem", setup_mem); /* compatibility with x86 */
215
157static int __init setup_isolnodes(char *str) 216static int __init setup_isolnodes(char *str)
158{ 217{
159 char buf[MAX_NUMNODES * 5]; 218 char buf[MAX_NUMNODES * 5];
@@ -209,7 +268,7 @@ early_param("vmalloc", parse_vmalloc);
209/* 268/*
210 * Determine for each controller where its lowmem is mapped and how much of 269 * Determine for each controller where its lowmem is mapped and how much of
211 * it is mapped there. On controller zero, the first few megabytes are 270 * it is mapped there. On controller zero, the first few megabytes are
212 * already mapped in as code at MEM_SV_INTRPT, so in principle we could 271 * already mapped in as code at MEM_SV_START, so in principle we could
213 * start our data mappings higher up, but for now we don't bother, to avoid 272 * start our data mappings higher up, but for now we don't bother, to avoid
214 * additional confusion. 273 * additional confusion.
215 * 274 *
@@ -614,11 +673,12 @@ static void __init setup_bootmem_allocator_node(int i)
614 /* 673 /*
615 * Throw away any memory aliased by the PCI region. 674 * Throw away any memory aliased by the PCI region.
616 */ 675 */
617 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) 676 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start) {
618 reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn), 677 start = max(pci_reserve_start_pfn, start);
619 PFN_PHYS(pci_reserve_end_pfn - 678 end = min(pci_reserve_end_pfn, end);
620 pci_reserve_start_pfn), 679 reserve_bootmem(PFN_PHYS(start), PFN_PHYS(end - start),
621 BOOTMEM_EXCLUSIVE); 680 BOOTMEM_EXCLUSIVE);
681 }
622#endif 682#endif
623} 683}
624 684
@@ -628,6 +688,31 @@ static void __init setup_bootmem_allocator(void)
628 for (i = 0; i < MAX_NUMNODES; ++i) 688 for (i = 0; i < MAX_NUMNODES; ++i)
629 setup_bootmem_allocator_node(i); 689 setup_bootmem_allocator_node(i);
630 690
691 /* Reserve any memory excluded by "memmap" arguments. */
692 for (i = 0; i < memmap_nr; ++i) {
693 struct memmap_entry *m = &memmap_map[i];
694 reserve_bootmem(m->addr, m->size, 0);
695 }
696
697#ifdef CONFIG_BLK_DEV_INITRD
698 if (initrd_start) {
699 /* Make sure the initrd memory region is not modified. */
700 if (reserve_bootmem(initrd_start, initrd_end - initrd_start,
701 BOOTMEM_EXCLUSIVE)) {
702 pr_crit("The initrd memory region has been polluted. Disabling it.\n");
703 initrd_start = 0;
704 initrd_end = 0;
705 } else {
706 /*
707 * Translate initrd_start & initrd_end from PA to VA for
708 * future access.
709 */
710 initrd_start += PAGE_OFFSET;
711 initrd_end += PAGE_OFFSET;
712 }
713 }
714#endif
715
631#ifdef CONFIG_KEXEC 716#ifdef CONFIG_KEXEC
632 if (crashk_res.start != crashk_res.end) 717 if (crashk_res.start != crashk_res.end)
633 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); 718 reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0);
@@ -961,9 +1046,6 @@ void setup_cpu(int boot)
961 arch_local_irq_unmask(INT_DMATLB_MISS); 1046 arch_local_irq_unmask(INT_DMATLB_MISS);
962 arch_local_irq_unmask(INT_DMATLB_ACCESS); 1047 arch_local_irq_unmask(INT_DMATLB_ACCESS);
963#endif 1048#endif
964#if CHIP_HAS_SN_PROC()
965 arch_local_irq_unmask(INT_SNITLB_MISS);
966#endif
967#ifdef __tilegx__ 1049#ifdef __tilegx__
968 arch_local_irq_unmask(INT_SINGLE_STEP_K); 1050 arch_local_irq_unmask(INT_SINGLE_STEP_K);
969#endif 1051#endif
@@ -978,10 +1060,6 @@ void setup_cpu(int boot)
978 /* Static network is not restricted. */ 1060 /* Static network is not restricted. */
979 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); 1061 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1);
980#endif 1062#endif
981#if CHIP_HAS_SN_PROC()
982 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1);
983 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1);
984#endif
985 1063
986 /* 1064 /*
987 * Set the MPL for interrupt control 0 & 1 to the corresponding 1065 * Set the MPL for interrupt control 0 & 1 to the corresponding
@@ -1029,6 +1107,10 @@ static void __init load_hv_initrd(void)
1029 int fd, rc; 1107 int fd, rc;
1030 void *initrd; 1108 void *initrd;
1031 1109
1110 /* If initrd has already been set, skip initramfs file in hvfs. */
1111 if (initrd_start)
1112 return;
1113
1032 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 1114 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
1033 if (fd == HV_ENOENT) { 1115 if (fd == HV_ENOENT) {
1034 if (set_initramfs_file) { 1116 if (set_initramfs_file) {
@@ -1067,6 +1149,25 @@ void __init free_initrd_mem(unsigned long begin, unsigned long end)
1067 free_bootmem(__pa(begin), end - begin); 1149 free_bootmem(__pa(begin), end - begin);
1068} 1150}
1069 1151
1152static int __init setup_initrd(char *str)
1153{
1154 char *endp;
1155 unsigned long initrd_size;
1156
1157 initrd_size = str ? simple_strtoul(str, &endp, 0) : 0;
1158 if (initrd_size == 0 || *endp != '@')
1159 return -EINVAL;
1160
1161 initrd_start = simple_strtoul(endp+1, &endp, 0);
1162 if (initrd_start == 0)
1163 return -EINVAL;
1164
1165 initrd_end = initrd_start + initrd_size;
1166
1167 return 0;
1168}
1169early_param("initrd", setup_initrd);
1170
1070#else 1171#else
1071static inline void load_hv_initrd(void) {} 1172static inline void load_hv_initrd(void) {}
1072#endif /* CONFIG_BLK_DEV_INITRD */ 1173#endif /* CONFIG_BLK_DEV_INITRD */
@@ -1134,7 +1235,7 @@ static void __init validate_va(void)
1134#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */ 1235#ifndef __tilegx__ /* FIXME: GX: probably some validation relevant here */
1135 /* 1236 /*
1136 * Similarly, make sure we're only using allowed VAs. 1237 * Similarly, make sure we're only using allowed VAs.
1137 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, 1238 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_START,
1138 * and 0 .. KERNEL_HIGH_VADDR. 1239 * and 0 .. KERNEL_HIGH_VADDR.
1139 * In addition, make sure we CAN'T use the end of memory, since 1240 * In addition, make sure we CAN'T use the end of memory, since
1140 * we use the last chunk of each pgd for the pgd_list. 1241 * we use the last chunk of each pgd for the pgd_list.
@@ -1149,7 +1250,7 @@ static void __init validate_va(void)
1149 if (range.size == 0) 1250 if (range.size == 0)
1150 break; 1251 break;
1151 if (range.start <= MEM_USER_INTRPT && 1252 if (range.start <= MEM_USER_INTRPT &&
1152 range.start + range.size >= MEM_HV_INTRPT) 1253 range.start + range.size >= MEM_HV_START)
1153 user_kernel_ok = 1; 1254 user_kernel_ok = 1;
1154 if (range.start == 0) 1255 if (range.start == 0)
1155 max_va = range.size; 1256 max_va = range.size;
@@ -1183,7 +1284,6 @@ static void __init validate_va(void)
1183struct cpumask __write_once cpu_lotar_map; 1284struct cpumask __write_once cpu_lotar_map;
1184EXPORT_SYMBOL(cpu_lotar_map); 1285EXPORT_SYMBOL(cpu_lotar_map);
1185 1286
1186#if CHIP_HAS_CBOX_HOME_MAP()
1187/* 1287/*
1188 * hash_for_home_map lists all the tiles that hash-for-home data 1288 * hash_for_home_map lists all the tiles that hash-for-home data
1189 * will be cached on. Note that this may includes tiles that are not 1289 * will be cached on. Note that this may includes tiles that are not
@@ -1193,7 +1293,6 @@ EXPORT_SYMBOL(cpu_lotar_map);
1193 */ 1293 */
1194struct cpumask hash_for_home_map; 1294struct cpumask hash_for_home_map;
1195EXPORT_SYMBOL(hash_for_home_map); 1295EXPORT_SYMBOL(hash_for_home_map);
1196#endif
1197 1296
1198/* 1297/*
1199 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can 1298 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can
@@ -1286,7 +1385,6 @@ static void __init setup_cpu_maps(void)
1286 cpu_lotar_map = *cpu_possible_mask; 1385 cpu_lotar_map = *cpu_possible_mask;
1287 } 1386 }
1288 1387
1289#if CHIP_HAS_CBOX_HOME_MAP()
1290 /* Retrieve set of CPUs used for hash-for-home caching */ 1388 /* Retrieve set of CPUs used for hash-for-home caching */
1291 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, 1389 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE,
1292 (HV_VirtAddr) hash_for_home_map.bits, 1390 (HV_VirtAddr) hash_for_home_map.bits,
@@ -1294,9 +1392,6 @@ static void __init setup_cpu_maps(void)
1294 if (rc < 0) 1392 if (rc < 0)
1295 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1393 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc);
1296 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map); 1394 cpumask_or(&cpu_cacheable_map, cpu_possible_mask, &hash_for_home_map);
1297#else
1298 cpu_cacheable_map = *cpu_possible_mask;
1299#endif
1300} 1395}
1301 1396
1302 1397
@@ -1492,7 +1587,7 @@ void __init setup_per_cpu_areas(void)
1492 1587
1493 /* Update the vmalloc mapping and page home. */ 1588 /* Update the vmalloc mapping and page home. */
1494 unsigned long addr = (unsigned long)ptr + i; 1589 unsigned long addr = (unsigned long)ptr + i;
1495 pte_t *ptep = virt_to_pte(NULL, addr); 1590 pte_t *ptep = virt_to_kpte(addr);
1496 pte_t pte = *ptep; 1591 pte_t pte = *ptep;
1497 BUG_ON(pfn != pte_pfn(pte)); 1592 BUG_ON(pfn != pte_pfn(pte));
1498 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1593 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
@@ -1501,12 +1596,12 @@ void __init setup_per_cpu_areas(void)
1501 1596
1502 /* Update the lowmem mapping for consistency. */ 1597 /* Update the lowmem mapping for consistency. */
1503 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1598 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
1504 ptep = virt_to_pte(NULL, lowmem_va); 1599 ptep = virt_to_kpte(lowmem_va);
1505 if (pte_huge(*ptep)) { 1600 if (pte_huge(*ptep)) {
1506 printk(KERN_DEBUG "early shatter of huge page" 1601 printk(KERN_DEBUG "early shatter of huge page"
1507 " at %#lx\n", lowmem_va); 1602 " at %#lx\n", lowmem_va);
1508 shatter_pmd((pmd_t *)ptep); 1603 shatter_pmd((pmd_t *)ptep);
1509 ptep = virt_to_pte(NULL, lowmem_va); 1604 ptep = virt_to_kpte(lowmem_va);
1510 BUG_ON(pte_huge(*ptep)); 1605 BUG_ON(pte_huge(*ptep));
1511 } 1606 }
1512 BUG_ON(pfn != pte_pfn(*ptep)); 1607 BUG_ON(pfn != pte_pfn(*ptep));
@@ -1548,6 +1643,8 @@ insert_non_bus_resource(void)
1548{ 1643{
1549 struct resource *res = 1644 struct resource *res =
1550 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1645 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1646 if (!res)
1647 return NULL;
1551 res->name = "Non-Bus Physical Address Space"; 1648 res->name = "Non-Bus Physical Address Space";
1552 res->start = (1ULL << 32); 1649 res->start = (1ULL << 32);
1553 res->end = -1LL; 1650 res->end = -1LL;
@@ -1561,11 +1658,13 @@ insert_non_bus_resource(void)
1561#endif 1658#endif
1562 1659
1563static struct resource* __init 1660static struct resource* __init
1564insert_ram_resource(u64 start_pfn, u64 end_pfn) 1661insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
1565{ 1662{
1566 struct resource *res = 1663 struct resource *res =
1567 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1664 kzalloc(sizeof(struct resource), GFP_ATOMIC);
1568 res->name = "System RAM"; 1665 if (!res)
1666 return NULL;
1667 res->name = reserved ? "Reserved" : "System RAM";
1569 res->start = start_pfn << PAGE_SHIFT; 1668 res->start = start_pfn << PAGE_SHIFT;
1570 res->end = (end_pfn << PAGE_SHIFT) - 1; 1669 res->end = (end_pfn << PAGE_SHIFT) - 1;
1571 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 1670 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
@@ -1585,7 +1684,7 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn)
1585static int __init request_standard_resources(void) 1684static int __init request_standard_resources(void)
1586{ 1685{
1587 int i; 1686 int i;
1588 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 1687 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
1589 1688
1590#if defined(CONFIG_PCI) && !defined(__tilegx__) 1689#if defined(CONFIG_PCI) && !defined(__tilegx__)
1591 insert_non_bus_resource(); 1690 insert_non_bus_resource();
@@ -1600,11 +1699,11 @@ static int __init request_standard_resources(void)
1600 end_pfn > pci_reserve_start_pfn) { 1699 end_pfn > pci_reserve_start_pfn) {
1601 if (end_pfn > pci_reserve_end_pfn) 1700 if (end_pfn > pci_reserve_end_pfn)
1602 insert_ram_resource(pci_reserve_end_pfn, 1701 insert_ram_resource(pci_reserve_end_pfn,
1603 end_pfn); 1702 end_pfn, 0);
1604 end_pfn = pci_reserve_start_pfn; 1703 end_pfn = pci_reserve_start_pfn;
1605 } 1704 }
1606#endif 1705#endif
1607 insert_ram_resource(start_pfn, end_pfn); 1706 insert_ram_resource(start_pfn, end_pfn, 0);
1608 } 1707 }
1609 1708
1610 code_resource.start = __pa(_text - CODE_DELTA); 1709 code_resource.start = __pa(_text - CODE_DELTA);
@@ -1615,6 +1714,13 @@ static int __init request_standard_resources(void)
1615 insert_resource(&iomem_resource, &code_resource); 1714 insert_resource(&iomem_resource, &code_resource);
1616 insert_resource(&iomem_resource, &data_resource); 1715 insert_resource(&iomem_resource, &data_resource);
1617 1716
1717 /* Mark any "memmap" regions busy for the resource manager. */
1718 for (i = 0; i < memmap_nr; ++i) {
1719 struct memmap_entry *m = &memmap_map[i];
1720 insert_ram_resource(PFN_DOWN(m->addr),
1721 PFN_UP(m->addr + m->size - 1), 1);
1722 }
1723
1618#ifdef CONFIG_KEXEC 1724#ifdef CONFIG_KEXEC
1619 insert_resource(&iomem_resource, &crashk_res); 1725 insert_resource(&iomem_resource, &crashk_res);
1620#endif 1726#endif
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index 9531845bf661..2d1dbf38a9ab 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -33,6 +33,7 @@
33#include <asm/ucontext.h> 33#include <asm/ucontext.h>
34#include <asm/sigframe.h> 34#include <asm/sigframe.h>
35#include <asm/syscalls.h> 35#include <asm/syscalls.h>
36#include <asm/vdso.h>
36#include <arch/interrupts.h> 37#include <arch/interrupts.h>
37 38
38#define DEBUG_SIG 0 39#define DEBUG_SIG 0
@@ -190,7 +191,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
190 if (err) 191 if (err)
191 goto give_sigsegv; 192 goto give_sigsegv;
192 193
193 restorer = VDSO_BASE; 194 restorer = VDSO_SYM(&__vdso_rt_sigreturn);
194 if (ka->sa.sa_flags & SA_RESTORER) 195 if (ka->sa.sa_flags & SA_RESTORER)
195 restorer = (unsigned long) ka->sa.sa_restorer; 196 restorer = (unsigned long) ka->sa.sa_restorer;
196 197
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 27742e87e255..de07fa7d1315 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -12,41 +12,30 @@
12 * more details. 12 * more details.
13 * 13 *
14 * A code-rewriter that enables instruction single-stepping. 14 * A code-rewriter that enables instruction single-stepping.
15 * Derived from iLib's single-stepping code.
16 */ 15 */
17 16
18#ifndef __tilegx__ /* Hardware support for single step unavailable. */ 17#include <linux/smp.h>
19 18#include <linux/ptrace.h>
20/* These functions are only used on the TILE platform */
21#include <linux/slab.h> 19#include <linux/slab.h>
22#include <linux/thread_info.h> 20#include <linux/thread_info.h>
23#include <linux/uaccess.h> 21#include <linux/uaccess.h>
24#include <linux/mman.h> 22#include <linux/mman.h>
25#include <linux/types.h> 23#include <linux/types.h>
26#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/prctl.h>
27#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/traps.h>
28#include <asm/uaccess.h>
28#include <asm/unaligned.h> 29#include <asm/unaligned.h>
29#include <arch/abi.h> 30#include <arch/abi.h>
31#include <arch/spr_def.h>
30#include <arch/opcode.h> 32#include <arch/opcode.h>
31 33
32#define signExtend17(val) sign_extend((val), 17)
33#define TILE_X1_MASK (0xffffffffULL << 31)
34
35int unaligned_printk;
36 34
37static int __init setup_unaligned_printk(char *str) 35#ifndef __tilegx__ /* Hardware support for single step unavailable. */
38{
39 long val;
40 if (strict_strtol(str, 0, &val) != 0)
41 return 0;
42 unaligned_printk = val;
43 pr_info("Printk for each unaligned data accesses is %s\n",
44 unaligned_printk ? "enabled" : "disabled");
45 return 1;
46}
47__setup("unaligned_printk=", setup_unaligned_printk);
48 36
49unsigned int unaligned_fixup_count; 37#define signExtend17(val) sign_extend((val), 17)
38#define TILE_X1_MASK (0xffffffffULL << 31)
50 39
51enum mem_op { 40enum mem_op {
52 MEMOP_NONE, 41 MEMOP_NONE,
@@ -56,12 +45,13 @@ enum mem_op {
56 MEMOP_STORE_POSTINCR 45 MEMOP_STORE_POSTINCR
57}; 46};
58 47
59static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset) 48static inline tilepro_bundle_bits set_BrOff_X1(tilepro_bundle_bits n,
49 s32 offset)
60{ 50{
61 tile_bundle_bits result; 51 tilepro_bundle_bits result;
62 52
63 /* mask out the old offset */ 53 /* mask out the old offset */
64 tile_bundle_bits mask = create_BrOff_X1(-1); 54 tilepro_bundle_bits mask = create_BrOff_X1(-1);
65 result = n & (~mask); 55 result = n & (~mask);
66 56
67 /* or in the new offset */ 57 /* or in the new offset */
@@ -70,10 +60,11 @@ static inline tile_bundle_bits set_BrOff_X1(tile_bundle_bits n, s32 offset)
70 return result; 60 return result;
71} 61}
72 62
73static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) 63static inline tilepro_bundle_bits move_X1(tilepro_bundle_bits n, int dest,
64 int src)
74{ 65{
75 tile_bundle_bits result; 66 tilepro_bundle_bits result;
76 tile_bundle_bits op; 67 tilepro_bundle_bits op;
77 68
78 result = n & (~TILE_X1_MASK); 69 result = n & (~TILE_X1_MASK);
79 70
@@ -87,13 +78,13 @@ static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src)
87 return result; 78 return result;
88} 79}
89 80
90static inline tile_bundle_bits nop_X1(tile_bundle_bits n) 81static inline tilepro_bundle_bits nop_X1(tilepro_bundle_bits n)
91{ 82{
92 return move_X1(n, TREG_ZERO, TREG_ZERO); 83 return move_X1(n, TREG_ZERO, TREG_ZERO);
93} 84}
94 85
95static inline tile_bundle_bits addi_X1( 86static inline tilepro_bundle_bits addi_X1(
96 tile_bundle_bits n, int dest, int src, int imm) 87 tilepro_bundle_bits n, int dest, int src, int imm)
97{ 88{
98 n &= ~TILE_X1_MASK; 89 n &= ~TILE_X1_MASK;
99 90
@@ -107,15 +98,26 @@ static inline tile_bundle_bits addi_X1(
107 return n; 98 return n;
108} 99}
109 100
110static tile_bundle_bits rewrite_load_store_unaligned( 101static tilepro_bundle_bits rewrite_load_store_unaligned(
111 struct single_step_state *state, 102 struct single_step_state *state,
112 tile_bundle_bits bundle, 103 tilepro_bundle_bits bundle,
113 struct pt_regs *regs, 104 struct pt_regs *regs,
114 enum mem_op mem_op, 105 enum mem_op mem_op,
115 int size, int sign_ext) 106 int size, int sign_ext)
116{ 107{
117 unsigned char __user *addr; 108 unsigned char __user *addr;
118 int val_reg, addr_reg, err, val; 109 int val_reg, addr_reg, err, val;
110 int align_ctl;
111
112 align_ctl = unaligned_fixup;
113 switch (task_thread_info(current)->align_ctl) {
114 case PR_UNALIGN_NOPRINT:
115 align_ctl = 1;
116 break;
117 case PR_UNALIGN_SIGBUS:
118 align_ctl = 0;
119 break;
120 }
119 121
120 /* Get address and value registers */ 122 /* Get address and value registers */
121 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) { 123 if (bundle & TILEPRO_BUNDLE_Y_ENCODING_MASK) {
@@ -160,7 +162,7 @@ static tile_bundle_bits rewrite_load_store_unaligned(
160 * tilepro hardware would be doing, if it could provide us with the 162 * tilepro hardware would be doing, if it could provide us with the
161 * actual bad address in an SPR, which it doesn't. 163 * actual bad address in an SPR, which it doesn't.
162 */ 164 */
163 if (unaligned_fixup == 0) { 165 if (align_ctl == 0) {
164 siginfo_t info = { 166 siginfo_t info = {
165 .si_signo = SIGBUS, 167 .si_signo = SIGBUS,
166 .si_code = BUS_ADRALN, 168 .si_code = BUS_ADRALN,
@@ -209,14 +211,14 @@ static tile_bundle_bits rewrite_load_store_unaligned(
209 211
210 if (err) { 212 if (err) {
211 siginfo_t info = { 213 siginfo_t info = {
212 .si_signo = SIGSEGV, 214 .si_signo = SIGBUS,
213 .si_code = SEGV_MAPERR, 215 .si_code = BUS_ADRALN,
214 .si_addr = addr 216 .si_addr = addr
215 }; 217 };
216 trace_unhandled_signal("segfault", regs, 218 trace_unhandled_signal("bad address for unaligned fixup", regs,
217 (unsigned long)addr, SIGSEGV); 219 (unsigned long)addr, SIGBUS);
218 force_sig_info(info.si_signo, &info, current); 220 force_sig_info(info.si_signo, &info, current);
219 return (tile_bundle_bits) 0; 221 return (tilepro_bundle_bits) 0;
220 } 222 }
221 223
222 if (unaligned_printk || unaligned_fixup_count == 0) { 224 if (unaligned_printk || unaligned_fixup_count == 0) {
@@ -285,7 +287,7 @@ void single_step_execve(void)
285 ti->step_state = NULL; 287 ti->step_state = NULL;
286} 288}
287 289
288/** 290/*
289 * single_step_once() - entry point when single stepping has been triggered. 291 * single_step_once() - entry point when single stepping has been triggered.
290 * @regs: The machine register state 292 * @regs: The machine register state
291 * 293 *
@@ -304,20 +306,31 @@ void single_step_execve(void)
304 */ 306 */
305void single_step_once(struct pt_regs *regs) 307void single_step_once(struct pt_regs *regs)
306{ 308{
307 extern tile_bundle_bits __single_step_ill_insn; 309 extern tilepro_bundle_bits __single_step_ill_insn;
308 extern tile_bundle_bits __single_step_j_insn; 310 extern tilepro_bundle_bits __single_step_j_insn;
309 extern tile_bundle_bits __single_step_addli_insn; 311 extern tilepro_bundle_bits __single_step_addli_insn;
310 extern tile_bundle_bits __single_step_auli_insn; 312 extern tilepro_bundle_bits __single_step_auli_insn;
311 struct thread_info *info = (void *)current_thread_info(); 313 struct thread_info *info = (void *)current_thread_info();
312 struct single_step_state *state = info->step_state; 314 struct single_step_state *state = info->step_state;
313 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 315 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
314 tile_bundle_bits __user *buffer, *pc; 316 tilepro_bundle_bits __user *buffer, *pc;
315 tile_bundle_bits bundle; 317 tilepro_bundle_bits bundle;
316 int temp_reg; 318 int temp_reg;
317 int target_reg = TREG_LR; 319 int target_reg = TREG_LR;
318 int err; 320 int err;
319 enum mem_op mem_op = MEMOP_NONE; 321 enum mem_op mem_op = MEMOP_NONE;
320 int size = 0, sign_ext = 0; /* happy compiler */ 322 int size = 0, sign_ext = 0; /* happy compiler */
323 int align_ctl;
324
325 align_ctl = unaligned_fixup;
326 switch (task_thread_info(current)->align_ctl) {
327 case PR_UNALIGN_NOPRINT:
328 align_ctl = 1;
329 break;
330 case PR_UNALIGN_SIGBUS:
331 align_ctl = 0;
332 break;
333 }
321 334
322 asm( 335 asm(
323" .pushsection .rodata.single_step\n" 336" .pushsection .rodata.single_step\n"
@@ -390,7 +403,7 @@ void single_step_once(struct pt_regs *regs)
390 if (regs->faultnum == INT_SWINT_1) 403 if (regs->faultnum == INT_SWINT_1)
391 regs->pc -= 8; 404 regs->pc -= 8;
392 405
393 pc = (tile_bundle_bits __user *)(regs->pc); 406 pc = (tilepro_bundle_bits __user *)(regs->pc);
394 if (get_user(bundle, pc) != 0) { 407 if (get_user(bundle, pc) != 0) {
395 pr_err("Couldn't read instruction at %p trying to step\n", pc); 408 pr_err("Couldn't read instruction at %p trying to step\n", pc);
396 return; 409 return;
@@ -533,7 +546,6 @@ void single_step_once(struct pt_regs *regs)
533 } 546 }
534 break; 547 break;
535 548
536#if CHIP_HAS_WH64()
537 /* postincrement operations */ 549 /* postincrement operations */
538 case IMM_0_OPCODE_X1: 550 case IMM_0_OPCODE_X1:
539 switch (get_ImmOpcodeExtension_X1(bundle)) { 551 switch (get_ImmOpcodeExtension_X1(bundle)) {
@@ -568,7 +580,6 @@ void single_step_once(struct pt_regs *regs)
568 break; 580 break;
569 } 581 }
570 break; 582 break;
571#endif /* CHIP_HAS_WH64() */
572 } 583 }
573 584
574 if (state->update) { 585 if (state->update) {
@@ -627,9 +638,9 @@ void single_step_once(struct pt_regs *regs)
627 638
628 /* 639 /*
629 * Check if we need to rewrite an unaligned load/store. 640 * Check if we need to rewrite an unaligned load/store.
630 * Returning zero is a special value meaning we need to SIGSEGV. 641 * Returning zero is a special value meaning we generated a signal.
631 */ 642 */
632 if (mem_op != MEMOP_NONE && unaligned_fixup >= 0) { 643 if (mem_op != MEMOP_NONE && align_ctl >= 0) {
633 bundle = rewrite_load_store_unaligned(state, bundle, regs, 644 bundle = rewrite_load_store_unaligned(state, bundle, regs,
634 mem_op, size, sign_ext); 645 mem_op, size, sign_ext);
635 if (bundle == 0) 646 if (bundle == 0)
@@ -668,9 +679,9 @@ void single_step_once(struct pt_regs *regs)
668 } 679 }
669 680
670 /* End with a jump back to the next instruction */ 681 /* End with a jump back to the next instruction */
671 delta = ((regs->pc + TILE_BUNDLE_SIZE_IN_BYTES) - 682 delta = ((regs->pc + TILEPRO_BUNDLE_SIZE_IN_BYTES) -
672 (unsigned long)buffer) >> 683 (unsigned long)buffer) >>
673 TILE_LOG2_BUNDLE_ALIGNMENT_IN_BYTES; 684 TILEPRO_LOG2_BUNDLE_ALIGNMENT_IN_BYTES;
674 bundle = __single_step_j_insn; 685 bundle = __single_step_j_insn;
675 bundle |= create_JOffLong_X1(delta); 686 bundle |= create_JOffLong_X1(delta);
676 err |= __put_user(bundle, buffer++); 687 err |= __put_user(bundle, buffer++);
@@ -698,9 +709,6 @@ void single_step_once(struct pt_regs *regs)
698} 709}
699 710
700#else 711#else
701#include <linux/smp.h>
702#include <linux/ptrace.h>
703#include <arch/spr_def.h>
704 712
705static DEFINE_PER_CPU(unsigned long, ss_saved_pc); 713static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
706 714
@@ -743,10 +751,10 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
743 } else if ((*ss_pc != regs->pc) || 751 } else if ((*ss_pc != regs->pc) ||
744 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) { 752 (!(control & SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK))) {
745 753
746 ptrace_notify(SIGTRAP);
747 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK; 754 control |= SPR_SINGLE_STEP_CONTROL_1__CANCELED_MASK;
748 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK; 755 control |= SPR_SINGLE_STEP_CONTROL_1__INHIBIT_MASK;
749 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control); 756 __insn_mtspr(SPR_SINGLE_STEP_CONTROL_K, control);
757 send_sigtrap(current, regs);
750 } 758 }
751} 759}
752 760
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index cbc73a8b8fe1..01e8ab29f43a 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -20,8 +20,13 @@
20#include <linux/irq.h> 20#include <linux/irq.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/homecache.h>
23 24
24HV_Topology smp_topology __write_once; 25/*
26 * We write to width and height with a single store in head_NN.S,
27 * so make the variable aligned to "long".
28 */
29HV_Topology smp_topology __write_once __aligned(sizeof(long));
25EXPORT_SYMBOL(smp_topology); 30EXPORT_SYMBOL(smp_topology);
26 31
27#if CHIP_HAS_IPI() 32#if CHIP_HAS_IPI()
@@ -100,8 +105,8 @@ static void smp_start_cpu_interrupt(void)
100/* Handler to stop the current cpu. */ 105/* Handler to stop the current cpu. */
101static void smp_stop_cpu_interrupt(void) 106static void smp_stop_cpu_interrupt(void)
102{ 107{
103 set_cpu_online(smp_processor_id(), 0);
104 arch_local_irq_disable_all(); 108 arch_local_irq_disable_all();
109 set_cpu_online(smp_processor_id(), 0);
105 for (;;) 110 for (;;)
106 asm("nap; nop"); 111 asm("nap; nop");
107} 112}
@@ -167,9 +172,16 @@ static void ipi_flush_icache_range(void *info)
167void flush_icache_range(unsigned long start, unsigned long end) 172void flush_icache_range(unsigned long start, unsigned long end)
168{ 173{
169 struct ipi_flush flush = { start, end }; 174 struct ipi_flush flush = { start, end };
170 preempt_disable(); 175
171 on_each_cpu(ipi_flush_icache_range, &flush, 1); 176 /* If invoked with irqs disabled, we can not issue IPIs. */
172 preempt_enable(); 177 if (irqs_disabled())
178 flush_remote(0, HV_FLUSH_EVICT_L1I, NULL, 0, 0, 0,
179 NULL, NULL, 0);
180 else {
181 preempt_disable();
182 on_each_cpu(ipi_flush_icache_range, &flush, 1);
183 preempt_enable();
184 }
173} 185}
174 186
175 187
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index a535655b7089..732e9d138661 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -142,13 +142,15 @@ static struct cpumask cpu_started;
142 */ 142 */
143static void start_secondary(void) 143static void start_secondary(void)
144{ 144{
145 int cpuid = smp_processor_id(); 145 int cpuid;
146
147 preempt_disable();
148
149 cpuid = smp_processor_id();
146 150
147 /* Set our thread pointer appropriately. */ 151 /* Set our thread pointer appropriately. */
148 set_my_cpu_offset(__per_cpu_offset[cpuid]); 152 set_my_cpu_offset(__per_cpu_offset[cpuid]);
149 153
150 preempt_disable();
151
152 /* 154 /*
153 * In large machines even this will slow us down, since we 155 * In large machines even this will slow us down, since we
154 * will be contending for for the printk spinlock. 156 * will be contending for for the printk spinlock.
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index af8dfc9665f6..362284af3afd 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -29,6 +29,7 @@
29#include <asm/switch_to.h> 29#include <asm/switch_to.h>
30#include <asm/sigframe.h> 30#include <asm/sigframe.h>
31#include <asm/stack.h> 31#include <asm/stack.h>
32#include <asm/vdso.h>
32#include <arch/abi.h> 33#include <arch/abi.h>
33#include <arch/interrupts.h> 34#include <arch/interrupts.h>
34 35
@@ -102,9 +103,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
102 p->sp >= sp) { 103 p->sp >= sp) {
103 if (kbt->verbose) 104 if (kbt->verbose)
104 pr_err(" <%s while in kernel mode>\n", fault); 105 pr_err(" <%s while in kernel mode>\n", fault);
105 } else if (EX1_PL(p->ex1) == USER_PL && 106 } else if (user_mode(p) &&
106 p->pc < PAGE_OFFSET && 107 p->sp < PAGE_OFFSET && p->sp != 0) {
107 p->sp < PAGE_OFFSET) {
108 if (kbt->verbose) 108 if (kbt->verbose)
109 pr_err(" <%s while in user mode>\n", fault); 109 pr_err(" <%s while in user mode>\n", fault);
110 } else if (kbt->verbose) { 110 } else if (kbt->verbose) {
@@ -120,7 +120,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
120/* Is the pc pointing to a sigreturn trampoline? */ 120/* Is the pc pointing to a sigreturn trampoline? */
121static int is_sigreturn(unsigned long pc) 121static int is_sigreturn(unsigned long pc)
122{ 122{
123 return (pc == VDSO_BASE); 123 return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn));
124} 124}
125 125
126/* Return a pt_regs pointer for a valid signal handler frame */ 126/* Return a pt_regs pointer for a valid signal handler frame */
@@ -129,7 +129,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
129{ 129{
130 BacktraceIterator *b = &kbt->it; 130 BacktraceIterator *b = &kbt->it;
131 131
132 if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET && 132 if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET &&
133 b->sp % sizeof(long) == 0) { 133 b->sp % sizeof(long) == 0) {
134 int retval; 134 int retval;
135 pagefault_disable(); 135 pagefault_disable();
@@ -195,21 +195,21 @@ static int KBacktraceIterator_next_item_inclusive(
195 */ 195 */
196static void validate_stack(struct pt_regs *regs) 196static void validate_stack(struct pt_regs *regs)
197{ 197{
198 int cpu = smp_processor_id(); 198 int cpu = raw_smp_processor_id();
199 unsigned long ksp0 = get_current_ksp0(); 199 unsigned long ksp0 = get_current_ksp0();
200 unsigned long ksp0_base = ksp0 - THREAD_SIZE; 200 unsigned long ksp0_base = ksp0 & -THREAD_SIZE;
201 unsigned long sp = stack_pointer; 201 unsigned long sp = stack_pointer;
202 202
203 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) { 203 if (EX1_PL(regs->ex1) == KERNEL_PL && regs->sp >= ksp0) {
204 pr_err("WARNING: cpu %d: kernel stack page %#lx underrun!\n" 204 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx underrun!\n"
205 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 205 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
206 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 206 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
207 } 207 }
208 208
209 else if (sp < ksp0_base + sizeof(struct thread_info)) { 209 else if (sp < ksp0_base + sizeof(struct thread_info)) {
210 pr_err("WARNING: cpu %d: kernel stack page %#lx overrun!\n" 210 pr_err("WARNING: cpu %d: kernel stack %#lx..%#lx overrun!\n"
211 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n", 211 " sp %#lx (%#lx in caller), caller pc %#lx, lr %#lx\n",
212 cpu, ksp0_base, sp, regs->sp, regs->pc, regs->lr); 212 cpu, ksp0_base, ksp0, sp, regs->sp, regs->pc, regs->lr);
213 } 213 }
214} 214}
215 215
@@ -352,6 +352,26 @@ static void describe_addr(struct KBacktraceIterator *kbt,
352} 352}
353 353
354/* 354/*
355 * Avoid possible crash recursion during backtrace. If it happens, it
356 * makes it easy to lose the actual root cause of the failure, so we
357 * put a simple guard on all the backtrace loops.
358 */
359static bool start_backtrace(void)
360{
361 if (current->thread.in_backtrace) {
362 pr_err("Backtrace requested while in backtrace!\n");
363 return false;
364 }
365 current->thread.in_backtrace = true;
366 return true;
367}
368
369static void end_backtrace(void)
370{
371 current->thread.in_backtrace = false;
372}
373
374/*
355 * This method wraps the backtracer's more generic support. 375 * This method wraps the backtracer's more generic support.
356 * It is only invoked from the architecture-specific code; show_stack() 376 * It is only invoked from the architecture-specific code; show_stack()
357 * and dump_stack() (in entry.S) are architecture-independent entry points. 377 * and dump_stack() (in entry.S) are architecture-independent entry points.
@@ -361,6 +381,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
361 int i; 381 int i;
362 int have_mmap_sem = 0; 382 int have_mmap_sem = 0;
363 383
384 if (!start_backtrace())
385 return;
364 if (headers) { 386 if (headers) {
365 /* 387 /*
366 * Add a blank line since if we are called from panic(), 388 * Add a blank line since if we are called from panic(),
@@ -371,7 +393,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
371 pr_err("Starting stack dump of tid %d, pid %d (%s)" 393 pr_err("Starting stack dump of tid %d, pid %d (%s)"
372 " on cpu %d at cycle %lld\n", 394 " on cpu %d at cycle %lld\n",
373 kbt->task->pid, kbt->task->tgid, kbt->task->comm, 395 kbt->task->pid, kbt->task->tgid, kbt->task->comm,
374 smp_processor_id(), get_cycles()); 396 raw_smp_processor_id(), get_cycles());
375 } 397 }
376 kbt->verbose = 1; 398 kbt->verbose = 1;
377 i = 0; 399 i = 0;
@@ -402,6 +424,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
402 pr_err("Stack dump complete\n"); 424 pr_err("Stack dump complete\n");
403 if (have_mmap_sem) 425 if (have_mmap_sem)
404 up_read(&kbt->task->mm->mmap_sem); 426 up_read(&kbt->task->mm->mmap_sem);
427 end_backtrace();
405} 428}
406EXPORT_SYMBOL(tile_show_stack); 429EXPORT_SYMBOL(tile_show_stack);
407 430
@@ -463,6 +486,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
463 int skip = trace->skip; 486 int skip = trace->skip;
464 int i = 0; 487 int i = 0;
465 488
489 if (!start_backtrace())
490 goto done;
466 if (task == NULL || task == current) 491 if (task == NULL || task == current)
467 KBacktraceIterator_init_current(&kbt); 492 KBacktraceIterator_init_current(&kbt);
468 else 493 else
@@ -476,6 +501,8 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
476 break; 501 break;
477 trace->entries[i++] = kbt.it.pc; 502 trace->entries[i++] = kbt.it.pc;
478 } 503 }
504 end_backtrace();
505done:
479 trace->nr_entries = i; 506 trace->nr_entries = i;
480} 507}
481EXPORT_SYMBOL(save_stack_trace_tsk); 508EXPORT_SYMBOL(save_stack_trace_tsk);
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index b881a7be24bd..38debe706061 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -38,8 +38,10 @@
38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, 38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
39 unsigned long, flags) 39 unsigned long, flags)
40{ 40{
41 /* DCACHE is not particularly effective if not bound to one cpu. */
41 if (flags & DCACHE) 42 if (flags & DCACHE)
42 homecache_evict(cpumask_of(smp_processor_id())); 43 homecache_evict(cpumask_of(raw_smp_processor_id()));
44
43 if (flags & ICACHE) 45 if (flags & ICACHE)
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm), 46 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
45 0, 0, 0, NULL, NULL, 0); 47 0, 0, 0, NULL, NULL, 0);
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index e25b0a89c18f..a3ed12f8f83b 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -157,6 +157,67 @@ hvconfig_bin_read(struct file *filp, struct kobject *kobj,
157 return count; 157 return count;
158} 158}
159 159
160static ssize_t hv_stats_show(struct device *dev,
161 struct device_attribute *attr,
162 char *page)
163{
164 int cpu = dev->id;
165 long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
166
167 ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
168 (unsigned long)page, PAGE_SIZE - 1,
169 lotar, 0);
170 n = n < 0 ? 0 : min(n, (ssize_t)PAGE_SIZE - 1);
171 page[n] = '\0';
172 return n;
173}
174
175static ssize_t hv_stats_store(struct device *dev,
176 struct device_attribute *attr,
177 const char *page,
178 size_t count)
179{
180 int cpu = dev->id;
181 long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
182
183 ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS, 0, 0, lotar, 1);
184 return n < 0 ? n : count;
185}
186
187static DEVICE_ATTR(hv_stats, 0644, hv_stats_show, hv_stats_store);
188
189static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif)
190{
191 int err, cpu = dev->id;
192
193 if (!cpu_online(cpu))
194 return 0;
195
196 err = sysfs_create_file(&dev->kobj, &dev_attr_hv_stats.attr);
197
198 return err;
199}
200
201static int hv_stats_device_remove(struct device *dev,
202 struct subsys_interface *sif)
203{
204 int cpu = dev->id;
205
206 if (!cpu_online(cpu))
207 return 0;
208
209 sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr);
210 return 0;
211}
212
213
214static struct subsys_interface hv_stats_interface = {
215 .name = "hv_stats",
216 .subsys = &cpu_subsys,
217 .add_dev = hv_stats_device_add,
218 .remove_dev = hv_stats_device_remove,
219};
220
160static int __init create_sysfs_entries(void) 221static int __init create_sysfs_entries(void)
161{ 222{
162 int err = 0; 223 int err = 0;
@@ -188,6 +249,21 @@ static int __init create_sysfs_entries(void)
188 err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin); 249 err = sysfs_create_bin_file(hypervisor_kobj, &hvconfig_bin);
189 } 250 }
190 251
252 if (!err) {
253 /*
254 * Don't bother adding the hv_stats files on each CPU if
255 * our hypervisor doesn't supply statistics.
256 */
257 int cpu = raw_smp_processor_id();
258 long lotar = HV_XY_TO_LOTAR(cpu_x(cpu), cpu_y(cpu));
259 char dummy;
260 ssize_t n = hv_confstr(HV_CONFSTR_HV_STATS,
261 (unsigned long) &dummy, 1,
262 lotar, 0);
263 if (n >= 0)
264 err = subsys_interface_register(&hv_stats_interface);
265 }
266
191 return err; 267 return err;
192} 268}
193subsys_initcall(create_sysfs_entries); 269subsys_initcall(create_sysfs_entries);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 7c353d8c2da9..5d10642db63e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -23,8 +23,10 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/timekeeper_internal.h>
26#include <asm/irq_regs.h> 27#include <asm/irq_regs.h>
27#include <asm/traps.h> 28#include <asm/traps.h>
29#include <asm/vdso.h>
28#include <hv/hypervisor.h> 30#include <hv/hypervisor.h>
29#include <arch/interrupts.h> 31#include <arch/interrupts.h>
30#include <arch/spr_def.h> 32#include <arch/spr_def.h>
@@ -110,7 +112,6 @@ void __init time_init(void)
110 setup_tile_timer(); 112 setup_tile_timer();
111} 113}
112 114
113
114/* 115/*
115 * Define the tile timer clock event device. The timer is driven by 116 * Define the tile timer clock event device. The timer is driven by
116 * the TILE_TIMER_CONTROL register, which consists of a 31-bit down 117 * the TILE_TIMER_CONTROL register, which consists of a 31-bit down
@@ -237,3 +238,37 @@ cycles_t ns2cycles(unsigned long nsecs)
237 struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); 238 struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
238 return ((u64)nsecs * dev->mult) >> dev->shift; 239 return ((u64)nsecs * dev->mult) >> dev->shift;
239} 240}
241
242void update_vsyscall_tz(void)
243{
244 /* Userspace gettimeofday will spin while this value is odd. */
245 ++vdso_data->tz_update_count;
246 smp_wmb();
247 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
248 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
249 smp_wmb();
250 ++vdso_data->tz_update_count;
251}
252
253void update_vsyscall(struct timekeeper *tk)
254{
255 struct timespec wall_time = tk_xtime(tk);
256 struct timespec *wtm = &tk->wall_to_monotonic;
257 struct clocksource *clock = tk->clock;
258
259 if (clock != &cycle_counter_cs)
260 return;
261
262 /* Userspace gettimeofday will spin while this value is odd. */
263 ++vdso_data->tb_update_count;
264 smp_wmb();
265 vdso_data->xtime_tod_stamp = clock->cycle_last;
266 vdso_data->xtime_clock_sec = wall_time.tv_sec;
267 vdso_data->xtime_clock_nsec = wall_time.tv_nsec;
268 vdso_data->wtom_clock_sec = wtm->tv_sec;
269 vdso_data->wtom_clock_nsec = wtm->tv_nsec;
270 vdso_data->mult = clock->mult;
271 vdso_data->shift = clock->shift;
272 smp_wmb();
273 ++vdso_data->tb_update_count;
274}
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index 3fd54d5bbd4c..f23b53515671 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -91,8 +91,14 @@ void flush_tlb_all(void)
91 } 91 }
92} 92}
93 93
94/*
95 * Callers need to flush the L1I themselves if necessary, e.g. for
96 * kernel module unload. Otherwise we assume callers are not using
97 * executable pgprot_t's. Using EVICT_L1I means that dataplane cpus
98 * will get an unnecessary interrupt otherwise.
99 */
94void flush_tlb_kernel_range(unsigned long start, unsigned long end) 100void flush_tlb_kernel_range(unsigned long start, unsigned long end)
95{ 101{
96 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, 102 flush_remote(0, 0, NULL,
97 start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0); 103 start, end - start, PAGE_SIZE, cpu_online_mask, NULL, 0);
98} 104}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 5b19a23c8908..6b603d556ca6 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/kdebug.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/reboot.h> 20#include <linux/reboot.h>
20#include <linux/uaccess.h> 21#include <linux/uaccess.h>
@@ -29,7 +30,7 @@
29 30
30void __init trap_init(void) 31void __init trap_init(void)
31{ 32{
32 /* Nothing needed here since we link code at .intrpt1 */ 33 /* Nothing needed here since we link code at .intrpt */
33} 34}
34 35
35int unaligned_fixup = 1; 36int unaligned_fixup = 1;
@@ -100,13 +101,7 @@ static int retry_gpv(unsigned int gpv_reason)
100 101
101#endif /* CHIP_HAS_TILE_DMA() */ 102#endif /* CHIP_HAS_TILE_DMA() */
102 103
103#ifdef __tilegx__ 104extern tile_bundle_bits bpt_code;
104#define bundle_bits tilegx_bundle_bits
105#else
106#define bundle_bits tile_bundle_bits
107#endif
108
109extern bundle_bits bpt_code;
110 105
111asm(".pushsection .rodata.bpt_code,\"a\";" 106asm(".pushsection .rodata.bpt_code,\"a\";"
112 ".align 8;" 107 ".align 8;"
@@ -114,7 +109,7 @@ asm(".pushsection .rodata.bpt_code,\"a\";"
114 ".size bpt_code,.-bpt_code;" 109 ".size bpt_code,.-bpt_code;"
115 ".popsection"); 110 ".popsection");
116 111
117static int special_ill(bundle_bits bundle, int *sigp, int *codep) 112static int special_ill(tile_bundle_bits bundle, int *sigp, int *codep)
118{ 113{
119 int sig, code, maxcode; 114 int sig, code, maxcode;
120 115
@@ -214,24 +209,73 @@ static const char *const int_name[] = {
214#endif 209#endif
215}; 210};
216 211
212static int do_bpt(struct pt_regs *regs)
213{
214 unsigned long bundle, bcode, bpt;
215
216 bundle = *(unsigned long *)instruction_pointer(regs);
217
218 /*
219 * bpt shoule be { bpt; nop }, which is 0x286a44ae51485000ULL.
220 * we encode the unused least significant bits for other purpose.
221 */
222 bpt = bundle & ~((1ULL << 12) - 1);
223 if (bpt != TILE_BPT_BUNDLE)
224 return 0;
225
226 bcode = bundle & ((1ULL << 12) - 1);
227 /*
228 * notify the kprobe handlers, if instruction is likely to
229 * pertain to them.
230 */
231 switch (bcode) {
232 /* breakpoint_insn */
233 case 0:
234 notify_die(DIE_BREAK, "debug", regs, bundle,
235 INT_ILL, SIGTRAP);
236 break;
237 /* compiled_bpt */
238 case DIE_COMPILED_BPT:
239 notify_die(DIE_COMPILED_BPT, "debug", regs, bundle,
240 INT_ILL, SIGTRAP);
241 break;
242 /* breakpoint2_insn */
243 case DIE_SSTEPBP:
244 notify_die(DIE_SSTEPBP, "single_step", regs, bundle,
245 INT_ILL, SIGTRAP);
246 break;
247 default:
248 return 0;
249 }
250
251 return 1;
252}
253
217void __kprobes do_trap(struct pt_regs *regs, int fault_num, 254void __kprobes do_trap(struct pt_regs *regs, int fault_num,
218 unsigned long reason) 255 unsigned long reason)
219{ 256{
220 siginfo_t info = { 0 }; 257 siginfo_t info = { 0 };
221 int signo, code; 258 int signo, code;
222 unsigned long address = 0; 259 unsigned long address = 0;
223 bundle_bits instr; 260 tile_bundle_bits instr;
261 int is_kernel = !user_mode(regs);
262
263 /* Handle breakpoints, etc. */
264 if (is_kernel && fault_num == INT_ILL && do_bpt(regs))
265 return;
224 266
225 /* Re-enable interrupts. */ 267 /* Re-enable interrupts, if they were previously enabled. */
226 local_irq_enable(); 268 if (!(regs->flags & PT_FLAGS_DISABLE_IRQ))
269 local_irq_enable();
227 270
228 /* 271 /*
229 * If it hits in kernel mode and we can't fix it up, just exit the 272 * If it hits in kernel mode and we can't fix it up, just exit the
230 * current process and hope for the best. 273 * current process and hope for the best.
231 */ 274 */
232 if (!user_mode(regs)) { 275 if (is_kernel) {
233 const char *name; 276 const char *name;
234 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ 277 char buf[100];
278 if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
235 return; 279 return;
236 if (fault_num >= 0 && 280 if (fault_num >= 0 &&
237 fault_num < sizeof(int_name)/sizeof(int_name[0]) && 281 fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
@@ -239,10 +283,16 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
239 name = int_name[fault_num]; 283 name = int_name[fault_num];
240 else 284 else
241 name = "Unknown interrupt"; 285 name = "Unknown interrupt";
242 pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
243 fault_num, name, regs->pc);
244 if (fault_num == INT_GPV) 286 if (fault_num == INT_GPV)
245 pr_alert("GPV_REASON is %#lx\n", reason); 287 snprintf(buf, sizeof(buf), "; GPV_REASON %#lx", reason);
288#ifdef __tilegx__
289 else if (fault_num == INT_ILL_TRANS)
290 snprintf(buf, sizeof(buf), "; address %#lx", reason);
291#endif
292 else
293 buf[0] = '\0';
294 pr_alert("Kernel took bad trap %d (%s) at PC %#lx%s\n",
295 fault_num, name, regs->pc, buf);
246 show_regs(regs); 296 show_regs(regs);
247 do_exit(SIGKILL); /* FIXME: implement i386 die() */ 297 do_exit(SIGKILL); /* FIXME: implement i386 die() */
248 return; 298 return;
@@ -324,11 +374,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
324 fill_ra_stack(); 374 fill_ra_stack();
325 375
326 signo = SIGSEGV; 376 signo = SIGSEGV;
377 address = reason;
327 code = SEGV_MAPERR; 378 code = SEGV_MAPERR;
328 if (reason & SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK)
329 address = regs->pc;
330 else
331 address = 0; /* FIXME: GX: single-step for address */
332 break; 379 break;
333 } 380 }
334#endif 381#endif
diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c
new file mode 100644
index 000000000000..b425fb6a480d
--- /dev/null
+++ b/arch/tile/kernel/unaligned.c
@@ -0,0 +1,1609 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * A code-rewriter that handles unaligned exception.
15 */
16
17#include <linux/smp.h>
18#include <linux/ptrace.h>
19#include <linux/slab.h>
20#include <linux/thread_info.h>
21#include <linux/uaccess.h>
22#include <linux/mman.h>
23#include <linux/types.h>
24#include <linux/err.h>
25#include <linux/module.h>
26#include <linux/compat.h>
27#include <linux/prctl.h>
28#include <asm/cacheflush.h>
29#include <asm/traps.h>
30#include <asm/uaccess.h>
31#include <asm/unaligned.h>
32#include <arch/abi.h>
33#include <arch/spr_def.h>
34#include <arch/opcode.h>
35
36
37/*
38 * This file handles unaligned exception for tile-Gx. The tilepro's unaligned
39 * exception is supported out of single_step.c
40 */
41
42int unaligned_printk;
43
44static int __init setup_unaligned_printk(char *str)
45{
46 long val;
47 if (kstrtol(str, 0, &val) != 0)
48 return 0;
49 unaligned_printk = val;
50 pr_info("Printk for each unaligned data accesses is %s\n",
51 unaligned_printk ? "enabled" : "disabled");
52 return 1;
53}
54__setup("unaligned_printk=", setup_unaligned_printk);
55
56unsigned int unaligned_fixup_count;
57
58#ifdef __tilegx__
59
60/*
61 * Unalign data jit fixup code fragement. Reserved space is 128 bytes.
62 * The 1st 64-bit word saves fault PC address, 2nd word is the fault
63 * instruction bundle followed by 14 JIT bundles.
64 */
65
66struct unaligned_jit_fragment {
67 unsigned long pc;
68 tilegx_bundle_bits bundle;
69 tilegx_bundle_bits insn[14];
70};
71
72/*
73 * Check if a nop or fnop at bundle's pipeline X0.
74 */
75
76static bool is_bundle_x0_nop(tilegx_bundle_bits bundle)
77{
78 return (((get_UnaryOpcodeExtension_X0(bundle) ==
79 NOP_UNARY_OPCODE_X0) &&
80 (get_RRROpcodeExtension_X0(bundle) ==
81 UNARY_RRR_0_OPCODE_X0) &&
82 (get_Opcode_X0(bundle) ==
83 RRR_0_OPCODE_X0)) ||
84 ((get_UnaryOpcodeExtension_X0(bundle) ==
85 FNOP_UNARY_OPCODE_X0) &&
86 (get_RRROpcodeExtension_X0(bundle) ==
87 UNARY_RRR_0_OPCODE_X0) &&
88 (get_Opcode_X0(bundle) ==
89 RRR_0_OPCODE_X0)));
90}
91
92/*
93 * Check if nop or fnop at bundle's pipeline X1.
94 */
95
96static bool is_bundle_x1_nop(tilegx_bundle_bits bundle)
97{
98 return (((get_UnaryOpcodeExtension_X1(bundle) ==
99 NOP_UNARY_OPCODE_X1) &&
100 (get_RRROpcodeExtension_X1(bundle) ==
101 UNARY_RRR_0_OPCODE_X1) &&
102 (get_Opcode_X1(bundle) ==
103 RRR_0_OPCODE_X1)) ||
104 ((get_UnaryOpcodeExtension_X1(bundle) ==
105 FNOP_UNARY_OPCODE_X1) &&
106 (get_RRROpcodeExtension_X1(bundle) ==
107 UNARY_RRR_0_OPCODE_X1) &&
108 (get_Opcode_X1(bundle) ==
109 RRR_0_OPCODE_X1)));
110}
111
112/*
113 * Check if nop or fnop at bundle's Y0 pipeline.
114 */
115
116static bool is_bundle_y0_nop(tilegx_bundle_bits bundle)
117{
118 return (((get_UnaryOpcodeExtension_Y0(bundle) ==
119 NOP_UNARY_OPCODE_Y0) &&
120 (get_RRROpcodeExtension_Y0(bundle) ==
121 UNARY_RRR_1_OPCODE_Y0) &&
122 (get_Opcode_Y0(bundle) ==
123 RRR_1_OPCODE_Y0)) ||
124 ((get_UnaryOpcodeExtension_Y0(bundle) ==
125 FNOP_UNARY_OPCODE_Y0) &&
126 (get_RRROpcodeExtension_Y0(bundle) ==
127 UNARY_RRR_1_OPCODE_Y0) &&
128 (get_Opcode_Y0(bundle) ==
129 RRR_1_OPCODE_Y0)));
130}
131
132/*
133 * Check if nop or fnop at bundle's pipeline Y1.
134 */
135
136static bool is_bundle_y1_nop(tilegx_bundle_bits bundle)
137{
138 return (((get_UnaryOpcodeExtension_Y1(bundle) ==
139 NOP_UNARY_OPCODE_Y1) &&
140 (get_RRROpcodeExtension_Y1(bundle) ==
141 UNARY_RRR_1_OPCODE_Y1) &&
142 (get_Opcode_Y1(bundle) ==
143 RRR_1_OPCODE_Y1)) ||
144 ((get_UnaryOpcodeExtension_Y1(bundle) ==
145 FNOP_UNARY_OPCODE_Y1) &&
146 (get_RRROpcodeExtension_Y1(bundle) ==
147 UNARY_RRR_1_OPCODE_Y1) &&
148 (get_Opcode_Y1(bundle) ==
149 RRR_1_OPCODE_Y1)));
150}
151
152/*
153 * Test if a bundle's y0 and y1 pipelines are both nop or fnop.
154 */
155
156static bool is_y0_y1_nop(tilegx_bundle_bits bundle)
157{
158 return is_bundle_y0_nop(bundle) && is_bundle_y1_nop(bundle);
159}
160
161/*
162 * Test if a bundle's x0 and x1 pipelines are both nop or fnop.
163 */
164
165static bool is_x0_x1_nop(tilegx_bundle_bits bundle)
166{
167 return is_bundle_x0_nop(bundle) && is_bundle_x1_nop(bundle);
168}
169
170/*
171 * Find the destination, source registers of fault unalign access instruction
172 * at X1 or Y2. Also, allocate up to 3 scratch registers clob1, clob2 and
173 * clob3, which are guaranteed different from any register used in the fault
174 * bundle. r_alias is used to return if the other instructions other than the
175 * unalign load/store shares same register with ra, rb and rd.
176 */
177
178static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra,
179 uint64_t *rb, uint64_t *clob1, uint64_t *clob2,
180 uint64_t *clob3, bool *r_alias)
181{
182 int i;
183 uint64_t reg;
184 uint64_t reg_map = 0, alias_reg_map = 0, map;
185 bool alias;
186
187 *ra = -1;
188 *rb = -1;
189
190 if (rd)
191 *rd = -1;
192
193 *clob1 = -1;
194 *clob2 = -1;
195 *clob3 = -1;
196 alias = false;
197
198 /*
199 * Parse fault bundle, find potential used registers and mark
200 * corresponding bits in reg_map and alias_map. These 2 bit maps
201 * are used to find the scratch registers and determine if there
202 * is register alais.
203 */
204 if (bundle & TILEGX_BUNDLE_MODE_MASK) { /* Y Mode Bundle. */
205
206 reg = get_SrcA_Y2(bundle);
207 reg_map |= 1ULL << reg;
208 *ra = reg;
209 reg = get_SrcBDest_Y2(bundle);
210 reg_map |= 1ULL << reg;
211
212 if (rd) {
213 /* Load. */
214 *rd = reg;
215 alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
216 } else {
217 /* Store. */
218 *rb = reg;
219 alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
220 }
221
222 if (!is_bundle_y1_nop(bundle)) {
223 reg = get_SrcA_Y1(bundle);
224 reg_map |= (1ULL << reg);
225 map = (1ULL << reg);
226
227 reg = get_SrcB_Y1(bundle);
228 reg_map |= (1ULL << reg);
229 map |= (1ULL << reg);
230
231 reg = get_Dest_Y1(bundle);
232 reg_map |= (1ULL << reg);
233 map |= (1ULL << reg);
234
235 if (map & alias_reg_map)
236 alias = true;
237 }
238
239 if (!is_bundle_y0_nop(bundle)) {
240 reg = get_SrcA_Y0(bundle);
241 reg_map |= (1ULL << reg);
242 map = (1ULL << reg);
243
244 reg = get_SrcB_Y0(bundle);
245 reg_map |= (1ULL << reg);
246 map |= (1ULL << reg);
247
248 reg = get_Dest_Y0(bundle);
249 reg_map |= (1ULL << reg);
250 map |= (1ULL << reg);
251
252 if (map & alias_reg_map)
253 alias = true;
254 }
255 } else { /* X Mode Bundle. */
256
257 reg = get_SrcA_X1(bundle);
258 reg_map |= (1ULL << reg);
259 *ra = reg;
260 if (rd) {
261 /* Load. */
262 reg = get_Dest_X1(bundle);
263 reg_map |= (1ULL << reg);
264 *rd = reg;
265 alias_reg_map = (1ULL << *rd) | (1ULL << *ra);
266 } else {
267 /* Store. */
268 reg = get_SrcB_X1(bundle);
269 reg_map |= (1ULL << reg);
270 *rb = reg;
271 alias_reg_map = (1ULL << *ra) | (1ULL << *rb);
272 }
273
274 if (!is_bundle_x0_nop(bundle)) {
275 reg = get_SrcA_X0(bundle);
276 reg_map |= (1ULL << reg);
277 map = (1ULL << reg);
278
279 reg = get_SrcB_X0(bundle);
280 reg_map |= (1ULL << reg);
281 map |= (1ULL << reg);
282
283 reg = get_Dest_X0(bundle);
284 reg_map |= (1ULL << reg);
285 map |= (1ULL << reg);
286
287 if (map & alias_reg_map)
288 alias = true;
289 }
290 }
291
292 /*
293 * "alias" indicates if the unalign access registers have collision
294 * with others in the same bundle. We jsut simply test all register
295 * operands case (RRR), ignored the case with immidate. If a bundle
296 * has no register alias, we may do fixup in a simple or fast manner.
297 * So if an immidata field happens to hit with a register, we may end
298 * up fall back to the generic handling.
299 */
300
301 *r_alias = alias;
302
303 /* Flip bits on reg_map. */
304 reg_map ^= -1ULL;
305
306 /* Scan reg_map lower 54(TREG_SP) bits to find 3 set bits. */
307 for (i = 0; i < TREG_SP; i++) {
308 if (reg_map & (0x1ULL << i)) {
309 if (*clob1 == -1) {
310 *clob1 = i;
311 } else if (*clob2 == -1) {
312 *clob2 = i;
313 } else if (*clob3 == -1) {
314 *clob3 = i;
315 return;
316 }
317 }
318 }
319}
320
321/*
322 * Sanity check for register ra, rb, rd, clob1/2/3. Return true if any of them
323 * is unexpected.
324 */
325
326static bool check_regs(uint64_t rd, uint64_t ra, uint64_t rb,
327 uint64_t clob1, uint64_t clob2, uint64_t clob3)
328{
329 bool unexpected = false;
330 if ((ra >= 56) && (ra != TREG_ZERO))
331 unexpected = true;
332
333 if ((clob1 >= 56) || (clob2 >= 56) || (clob3 >= 56))
334 unexpected = true;
335
336 if (rd != -1) {
337 if ((rd >= 56) && (rd != TREG_ZERO))
338 unexpected = true;
339 } else {
340 if ((rb >= 56) && (rb != TREG_ZERO))
341 unexpected = true;
342 }
343 return unexpected;
344}
345
346
347#define GX_INSN_X0_MASK ((1ULL << 31) - 1)
348#define GX_INSN_X1_MASK (((1ULL << 31) - 1) << 31)
349#define GX_INSN_Y0_MASK ((0xFULL << 27) | (0xFFFFFULL))
350#define GX_INSN_Y1_MASK (GX_INSN_Y0_MASK << 31)
351#define GX_INSN_Y2_MASK ((0x7FULL << 51) | (0x7FULL << 20))
352
353#ifdef __LITTLE_ENDIAN
354#define GX_INSN_BSWAP(_bundle_) (_bundle_)
355#else
356#define GX_INSN_BSWAP(_bundle_) swab64(_bundle_)
357#endif /* __LITTLE_ENDIAN */
358
359/*
360 * __JIT_CODE(.) creates template bundles in .rodata.unalign_data section.
361 * The corresponding static function jix_x#_###(.) generates partial or
362 * whole bundle based on the template and given arguments.
363 */
364
365#define __JIT_CODE(_X_) \
366 asm (".pushsection .rodata.unalign_data, \"a\"\n" \
367 _X_"\n" \
368 ".popsection\n")
369
370__JIT_CODE("__unalign_jit_x1_mtspr: {mtspr 0, r0}");
371static tilegx_bundle_bits jit_x1_mtspr(int spr, int reg)
372{
373 extern tilegx_bundle_bits __unalign_jit_x1_mtspr;
374 return (GX_INSN_BSWAP(__unalign_jit_x1_mtspr) & GX_INSN_X1_MASK) |
375 create_MT_Imm14_X1(spr) | create_SrcA_X1(reg);
376}
377
378__JIT_CODE("__unalign_jit_x1_mfspr: {mfspr r0, 0}");
379static tilegx_bundle_bits jit_x1_mfspr(int reg, int spr)
380{
381 extern tilegx_bundle_bits __unalign_jit_x1_mfspr;
382 return (GX_INSN_BSWAP(__unalign_jit_x1_mfspr) & GX_INSN_X1_MASK) |
383 create_MF_Imm14_X1(spr) | create_Dest_X1(reg);
384}
385
386__JIT_CODE("__unalign_jit_x0_addi: {addi r0, r0, 0; iret}");
387static tilegx_bundle_bits jit_x0_addi(int rd, int ra, int imm8)
388{
389 extern tilegx_bundle_bits __unalign_jit_x0_addi;
390 return (GX_INSN_BSWAP(__unalign_jit_x0_addi) & GX_INSN_X0_MASK) |
391 create_Dest_X0(rd) | create_SrcA_X0(ra) |
392 create_Imm8_X0(imm8);
393}
394
395__JIT_CODE("__unalign_jit_x1_ldna: {ldna r0, r0}");
396static tilegx_bundle_bits jit_x1_ldna(int rd, int ra)
397{
398 extern tilegx_bundle_bits __unalign_jit_x1_ldna;
399 return (GX_INSN_BSWAP(__unalign_jit_x1_ldna) & GX_INSN_X1_MASK) |
400 create_Dest_X1(rd) | create_SrcA_X1(ra);
401}
402
403__JIT_CODE("__unalign_jit_x0_dblalign: {dblalign r0, r0 ,r0}");
404static tilegx_bundle_bits jit_x0_dblalign(int rd, int ra, int rb)
405{
406 extern tilegx_bundle_bits __unalign_jit_x0_dblalign;
407 return (GX_INSN_BSWAP(__unalign_jit_x0_dblalign) & GX_INSN_X0_MASK) |
408 create_Dest_X0(rd) | create_SrcA_X0(ra) |
409 create_SrcB_X0(rb);
410}
411
412__JIT_CODE("__unalign_jit_x1_iret: {iret}");
413static tilegx_bundle_bits jit_x1_iret(void)
414{
415 extern tilegx_bundle_bits __unalign_jit_x1_iret;
416 return GX_INSN_BSWAP(__unalign_jit_x1_iret) & GX_INSN_X1_MASK;
417}
418
419__JIT_CODE("__unalign_jit_x01_fnop: {fnop;fnop}");
420static tilegx_bundle_bits jit_x0_fnop(void)
421{
422 extern tilegx_bundle_bits __unalign_jit_x01_fnop;
423 return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X0_MASK;
424}
425
426static tilegx_bundle_bits jit_x1_fnop(void)
427{
428 extern tilegx_bundle_bits __unalign_jit_x01_fnop;
429 return GX_INSN_BSWAP(__unalign_jit_x01_fnop) & GX_INSN_X1_MASK;
430}
431
432__JIT_CODE("__unalign_jit_y2_dummy: {fnop; fnop; ld zero, sp}");
433static tilegx_bundle_bits jit_y2_dummy(void)
434{
435 extern tilegx_bundle_bits __unalign_jit_y2_dummy;
436 return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y2_MASK;
437}
438
439static tilegx_bundle_bits jit_y1_fnop(void)
440{
441 extern tilegx_bundle_bits __unalign_jit_y2_dummy;
442 return GX_INSN_BSWAP(__unalign_jit_y2_dummy) & GX_INSN_Y1_MASK;
443}
444
445__JIT_CODE("__unalign_jit_x1_st1_add: {st1_add r1, r0, 0}");
446static tilegx_bundle_bits jit_x1_st1_add(int ra, int rb, int imm8)
447{
448 extern tilegx_bundle_bits __unalign_jit_x1_st1_add;
449 return (GX_INSN_BSWAP(__unalign_jit_x1_st1_add) &
450 (~create_SrcA_X1(-1)) &
451 GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
452 create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
453}
454
455__JIT_CODE("__unalign_jit_x1_st: {crc32_8 r1, r0, r0; st r0, r0}");
456static tilegx_bundle_bits jit_x1_st(int ra, int rb)
457{
458 extern tilegx_bundle_bits __unalign_jit_x1_st;
459 return (GX_INSN_BSWAP(__unalign_jit_x1_st) & GX_INSN_X1_MASK) |
460 create_SrcA_X1(ra) | create_SrcB_X1(rb);
461}
462
463__JIT_CODE("__unalign_jit_x1_st_add: {st_add r1, r0, 0}");
464static tilegx_bundle_bits jit_x1_st_add(int ra, int rb, int imm8)
465{
466 extern tilegx_bundle_bits __unalign_jit_x1_st_add;
467 return (GX_INSN_BSWAP(__unalign_jit_x1_st_add) &
468 (~create_SrcA_X1(-1)) &
469 GX_INSN_X1_MASK) | create_SrcA_X1(ra) |
470 create_SrcB_X1(rb) | create_Dest_Imm8_X1(imm8);
471}
472
473__JIT_CODE("__unalign_jit_x1_ld: {crc32_8 r1, r0, r0; ld r0, r0}");
474static tilegx_bundle_bits jit_x1_ld(int rd, int ra)
475{
476 extern tilegx_bundle_bits __unalign_jit_x1_ld;
477 return (GX_INSN_BSWAP(__unalign_jit_x1_ld) & GX_INSN_X1_MASK) |
478 create_Dest_X1(rd) | create_SrcA_X1(ra);
479}
480
481__JIT_CODE("__unalign_jit_x1_ld_add: {ld_add r1, r0, 0}");
482static tilegx_bundle_bits jit_x1_ld_add(int rd, int ra, int imm8)
483{
484 extern tilegx_bundle_bits __unalign_jit_x1_ld_add;
485 return (GX_INSN_BSWAP(__unalign_jit_x1_ld_add) &
486 (~create_Dest_X1(-1)) &
487 GX_INSN_X1_MASK) | create_Dest_X1(rd) |
488 create_SrcA_X1(ra) | create_Imm8_X1(imm8);
489}
490
491__JIT_CODE("__unalign_jit_x0_bfexts: {bfexts r0, r0, 0, 0}");
492static tilegx_bundle_bits jit_x0_bfexts(int rd, int ra, int bfs, int bfe)
493{
494 extern tilegx_bundle_bits __unalign_jit_x0_bfexts;
495 return (GX_INSN_BSWAP(__unalign_jit_x0_bfexts) &
496 GX_INSN_X0_MASK) |
497 create_Dest_X0(rd) | create_SrcA_X0(ra) |
498 create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
499}
500
501__JIT_CODE("__unalign_jit_x0_bfextu: {bfextu r0, r0, 0, 0}");
502static tilegx_bundle_bits jit_x0_bfextu(int rd, int ra, int bfs, int bfe)
503{
504 extern tilegx_bundle_bits __unalign_jit_x0_bfextu;
505 return (GX_INSN_BSWAP(__unalign_jit_x0_bfextu) &
506 GX_INSN_X0_MASK) |
507 create_Dest_X0(rd) | create_SrcA_X0(ra) |
508 create_BFStart_X0(bfs) | create_BFEnd_X0(bfe);
509}
510
511__JIT_CODE("__unalign_jit_x1_addi: {bfextu r1, r1, 0, 0; addi r0, r0, 0}");
512static tilegx_bundle_bits jit_x1_addi(int rd, int ra, int imm8)
513{
514 extern tilegx_bundle_bits __unalign_jit_x1_addi;
515 return (GX_INSN_BSWAP(__unalign_jit_x1_addi) & GX_INSN_X1_MASK) |
516 create_Dest_X1(rd) | create_SrcA_X1(ra) |
517 create_Imm8_X1(imm8);
518}
519
520__JIT_CODE("__unalign_jit_x0_shrui: {shrui r0, r0, 0; iret}");
521static tilegx_bundle_bits jit_x0_shrui(int rd, int ra, int imm6)
522{
523 extern tilegx_bundle_bits __unalign_jit_x0_shrui;
524 return (GX_INSN_BSWAP(__unalign_jit_x0_shrui) &
525 GX_INSN_X0_MASK) |
526 create_Dest_X0(rd) | create_SrcA_X0(ra) |
527 create_ShAmt_X0(imm6);
528}
529
530__JIT_CODE("__unalign_jit_x0_rotli: {rotli r0, r0, 0; iret}");
531static tilegx_bundle_bits jit_x0_rotli(int rd, int ra, int imm6)
532{
533 extern tilegx_bundle_bits __unalign_jit_x0_rotli;
534 return (GX_INSN_BSWAP(__unalign_jit_x0_rotli) &
535 GX_INSN_X0_MASK) |
536 create_Dest_X0(rd) | create_SrcA_X0(ra) |
537 create_ShAmt_X0(imm6);
538}
539
540__JIT_CODE("__unalign_jit_x1_bnezt: {bnezt r0, __unalign_jit_x1_bnezt}");
541static tilegx_bundle_bits jit_x1_bnezt(int ra, int broff)
542{
543 extern tilegx_bundle_bits __unalign_jit_x1_bnezt;
544 return (GX_INSN_BSWAP(__unalign_jit_x1_bnezt) &
545 GX_INSN_X1_MASK) |
546 create_SrcA_X1(ra) | create_BrOff_X1(broff);
547}
548
549#undef __JIT_CODE
550
551/*
552 * This function generates unalign fixup JIT.
553 *
554 * We fist find unalign load/store instruction's destination, source
555 * reguisters: ra, rb and rd. and 3 scratch registers by calling
556 * find_regs(...). 3 scratch clobbers should not alias with any register
557 * used in the fault bundle. Then analyze the fault bundle to determine
558 * if it's a load or store, operand width, branch or address increment etc.
559 * At last generated JIT is copied into JIT code area in user space.
560 */
561
562static
563void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
564 int align_ctl)
565{
566 struct thread_info *info = current_thread_info();
567 struct unaligned_jit_fragment frag;
568 struct unaligned_jit_fragment *jit_code_area;
569 tilegx_bundle_bits bundle_2 = 0;
570 /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */
571 bool bundle_2_enable = true;
572 uint64_t ra, rb, rd = -1, clob1, clob2, clob3;
573 /*
574 * Indicate if the unalign access
575 * instruction's registers hit with
576 * others in the same bundle.
577 */
578 bool alias = false;
579 bool load_n_store = true;
580 bool load_store_signed = false;
581 unsigned int load_store_size = 8;
582 bool y1_br = false; /* True, for a branch in same bundle at Y1.*/
583 int y1_br_reg = 0;
584 /* True for link operation. i.e. jalr or lnk at Y1 */
585 bool y1_lr = false;
586 int y1_lr_reg = 0;
587 bool x1_add = false;/* True, for load/store ADD instruction at X1*/
588 int x1_add_imm8 = 0;
589 bool unexpected = false;
590 int n = 0, k;
591
592 jit_code_area =
593 (struct unaligned_jit_fragment *)(info->unalign_jit_base);
594
595 memset((void *)&frag, 0, sizeof(frag));
596
597 /* 0: X mode, Otherwise: Y mode. */
598 if (bundle & TILEGX_BUNDLE_MODE_MASK) {
599 unsigned int mod, opcode;
600
601 if (get_Opcode_Y1(bundle) == RRR_1_OPCODE_Y1 &&
602 get_RRROpcodeExtension_Y1(bundle) ==
603 UNARY_RRR_1_OPCODE_Y1) {
604
605 opcode = get_UnaryOpcodeExtension_Y1(bundle);
606
607 /*
608 * Test "jalr", "jalrp", "jr", "jrp" instruction at Y1
609 * pipeline.
610 */
611 switch (opcode) {
612 case JALR_UNARY_OPCODE_Y1:
613 case JALRP_UNARY_OPCODE_Y1:
614 y1_lr = true;
615 y1_lr_reg = 55; /* Link register. */
616 /* FALLTHROUGH */
617 case JR_UNARY_OPCODE_Y1:
618 case JRP_UNARY_OPCODE_Y1:
619 y1_br = true;
620 y1_br_reg = get_SrcA_Y1(bundle);
621 break;
622 case LNK_UNARY_OPCODE_Y1:
623 /* "lnk" at Y1 pipeline. */
624 y1_lr = true;
625 y1_lr_reg = get_Dest_Y1(bundle);
626 break;
627 }
628 }
629
630 opcode = get_Opcode_Y2(bundle);
631 mod = get_Mode(bundle);
632
633 /*
634 * bundle_2 is bundle after making Y2 as a dummy operation
635 * - ld zero, sp
636 */
637 bundle_2 = (bundle & (~GX_INSN_Y2_MASK)) | jit_y2_dummy();
638
639 /* Make Y1 as fnop if Y1 is a branch or lnk operation. */
640 if (y1_br || y1_lr) {
641 bundle_2 &= ~(GX_INSN_Y1_MASK);
642 bundle_2 |= jit_y1_fnop();
643 }
644
645 if (is_y0_y1_nop(bundle_2))
646 bundle_2_enable = false;
647
648 if (mod == MODE_OPCODE_YC2) {
649 /* Store. */
650 load_n_store = false;
651 load_store_size = 1 << opcode;
652 load_store_signed = false;
653 find_regs(bundle, 0, &ra, &rb, &clob1, &clob2,
654 &clob3, &alias);
655 if (load_store_size > 8)
656 unexpected = true;
657 } else {
658 /* Load. */
659 load_n_store = true;
660 if (mod == MODE_OPCODE_YB2) {
661 switch (opcode) {
662 case LD_OPCODE_Y2:
663 load_store_signed = false;
664 load_store_size = 8;
665 break;
666 case LD4S_OPCODE_Y2:
667 load_store_signed = true;
668 load_store_size = 4;
669 break;
670 case LD4U_OPCODE_Y2:
671 load_store_signed = false;
672 load_store_size = 4;
673 break;
674 default:
675 unexpected = true;
676 }
677 } else if (mod == MODE_OPCODE_YA2) {
678 if (opcode == LD2S_OPCODE_Y2) {
679 load_store_signed = true;
680 load_store_size = 2;
681 } else if (opcode == LD2U_OPCODE_Y2) {
682 load_store_signed = false;
683 load_store_size = 2;
684 } else
685 unexpected = true;
686 } else
687 unexpected = true;
688 find_regs(bundle, &rd, &ra, &rb, &clob1, &clob2,
689 &clob3, &alias);
690 }
691 } else {
692 unsigned int opcode;
693
694 /* bundle_2 is bundle after making X1 as "fnop". */
695 bundle_2 = (bundle & (~GX_INSN_X1_MASK)) | jit_x1_fnop();
696
697 if (is_x0_x1_nop(bundle_2))
698 bundle_2_enable = false;
699
700 if (get_Opcode_X1(bundle) == RRR_0_OPCODE_X1) {
701 opcode = get_UnaryOpcodeExtension_X1(bundle);
702
703 if (get_RRROpcodeExtension_X1(bundle) ==
704 UNARY_RRR_0_OPCODE_X1) {
705 load_n_store = true;
706 find_regs(bundle, &rd, &ra, &rb, &clob1,
707 &clob2, &clob3, &alias);
708
709 switch (opcode) {
710 case LD_UNARY_OPCODE_X1:
711 load_store_signed = false;
712 load_store_size = 8;
713 break;
714 case LD4S_UNARY_OPCODE_X1:
715 load_store_signed = true;
716 /* FALLTHROUGH */
717 case LD4U_UNARY_OPCODE_X1:
718 load_store_size = 4;
719 break;
720
721 case LD2S_UNARY_OPCODE_X1:
722 load_store_signed = true;
723 /* FALLTHROUGH */
724 case LD2U_UNARY_OPCODE_X1:
725 load_store_size = 2;
726 break;
727 default:
728 unexpected = true;
729 }
730 } else {
731 load_n_store = false;
732 load_store_signed = false;
733 find_regs(bundle, 0, &ra, &rb,
734 &clob1, &clob2, &clob3,
735 &alias);
736
737 opcode = get_RRROpcodeExtension_X1(bundle);
738 switch (opcode) {
739 case ST_RRR_0_OPCODE_X1:
740 load_store_size = 8;
741 break;
742 case ST4_RRR_0_OPCODE_X1:
743 load_store_size = 4;
744 break;
745 case ST2_RRR_0_OPCODE_X1:
746 load_store_size = 2;
747 break;
748 default:
749 unexpected = true;
750 }
751 }
752 } else if (get_Opcode_X1(bundle) == IMM8_OPCODE_X1) {
753 load_n_store = true;
754 opcode = get_Imm8OpcodeExtension_X1(bundle);
755 switch (opcode) {
756 case LD_ADD_IMM8_OPCODE_X1:
757 load_store_size = 8;
758 break;
759
760 case LD4S_ADD_IMM8_OPCODE_X1:
761 load_store_signed = true;
762 /* FALLTHROUGH */
763 case LD4U_ADD_IMM8_OPCODE_X1:
764 load_store_size = 4;
765 break;
766
767 case LD2S_ADD_IMM8_OPCODE_X1:
768 load_store_signed = true;
769 /* FALLTHROUGH */
770 case LD2U_ADD_IMM8_OPCODE_X1:
771 load_store_size = 2;
772 break;
773
774 case ST_ADD_IMM8_OPCODE_X1:
775 load_n_store = false;
776 load_store_size = 8;
777 break;
778 case ST4_ADD_IMM8_OPCODE_X1:
779 load_n_store = false;
780 load_store_size = 4;
781 break;
782 case ST2_ADD_IMM8_OPCODE_X1:
783 load_n_store = false;
784 load_store_size = 2;
785 break;
786 default:
787 unexpected = true;
788 }
789
790 if (!unexpected) {
791 x1_add = true;
792 if (load_n_store)
793 x1_add_imm8 = get_Imm8_X1(bundle);
794 else
795 x1_add_imm8 = get_Dest_Imm8_X1(bundle);
796 }
797
798 find_regs(bundle, load_n_store ? (&rd) : NULL,
799 &ra, &rb, &clob1, &clob2, &clob3, &alias);
800 } else
801 unexpected = true;
802 }
803
804 /*
805 * Some sanity check for register numbers extracted from fault bundle.
806 */
807 if (check_regs(rd, ra, rb, clob1, clob2, clob3) == true)
808 unexpected = true;
809
810 /* Give warning if register ra has an aligned address. */
811 if (!unexpected)
812 WARN_ON(!((load_store_size - 1) & (regs->regs[ra])));
813
814
815 /*
816 * Fault came from kernel space, here we only need take care of
817 * unaligned "get_user/put_user" macros defined in "uaccess.h".
818 * Basically, we will handle bundle like this:
819 * {ld/2u/4s rd, ra; movei rx, 0} or {st/2/4 ra, rb; movei rx, 0}
820 * (Refer to file "arch/tile/include/asm/uaccess.h" for details).
821 * For either load or store, byte-wise operation is performed by calling
822 * get_user() or put_user(). If the macro returns non-zero value,
823 * set the value to rx, otherwise set zero to rx. Finally make pc point
824 * to next bundle and return.
825 */
826
827 if (EX1_PL(regs->ex1) != USER_PL) {
828
829 unsigned long rx = 0;
830 unsigned long x = 0, ret = 0;
831
832 if (y1_br || y1_lr || x1_add ||
833 (load_store_signed !=
834 (load_n_store && load_store_size == 4))) {
835 /* No branch, link, wrong sign-ext or load/store add. */
836 unexpected = true;
837 } else if (!unexpected) {
838 if (bundle & TILEGX_BUNDLE_MODE_MASK) {
839 /*
840 * Fault bundle is Y mode.
841 * Check if the Y1 and Y0 is the form of
842 * { movei rx, 0; nop/fnop }, if yes,
843 * find the rx.
844 */
845
846 if ((get_Opcode_Y1(bundle) == ADDI_OPCODE_Y1)
847 && (get_SrcA_Y1(bundle) == TREG_ZERO) &&
848 (get_Imm8_Y1(bundle) == 0) &&
849 is_bundle_y0_nop(bundle)) {
850 rx = get_Dest_Y1(bundle);
851 } else if ((get_Opcode_Y0(bundle) ==
852 ADDI_OPCODE_Y0) &&
853 (get_SrcA_Y0(bundle) == TREG_ZERO) &&
854 (get_Imm8_Y0(bundle) == 0) &&
855 is_bundle_y1_nop(bundle)) {
856 rx = get_Dest_Y0(bundle);
857 } else {
858 unexpected = true;
859 }
860 } else {
861 /*
862 * Fault bundle is X mode.
863 * Check if the X0 is 'movei rx, 0',
864 * if yes, find the rx.
865 */
866
867 if ((get_Opcode_X0(bundle) == IMM8_OPCODE_X0)
868 && (get_Imm8OpcodeExtension_X0(bundle) ==
869 ADDI_IMM8_OPCODE_X0) &&
870 (get_SrcA_X0(bundle) == TREG_ZERO) &&
871 (get_Imm8_X0(bundle) == 0)) {
872 rx = get_Dest_X0(bundle);
873 } else {
874 unexpected = true;
875 }
876 }
877
878 /* rx should be less than 56. */
879 if (!unexpected && (rx >= 56))
880 unexpected = true;
881 }
882
883 if (!search_exception_tables(regs->pc)) {
884 /* No fixup in the exception tables for the pc. */
885 unexpected = true;
886 }
887
888 if (unexpected) {
889 /* Unexpected unalign kernel fault. */
890 struct task_struct *tsk = validate_current();
891
892 bust_spinlocks(1);
893
894 show_regs(regs);
895
896 if (unlikely(tsk->pid < 2)) {
897 panic("Kernel unalign fault running %s!",
898 tsk->pid ? "init" : "the idle task");
899 }
900#ifdef SUPPORT_DIE
901 die("Oops", regs);
902#endif
903 bust_spinlocks(1);
904
905 do_group_exit(SIGKILL);
906
907 } else {
908 unsigned long i, b = 0;
909 unsigned char *ptr =
910 (unsigned char *)regs->regs[ra];
911 if (load_n_store) {
912 /* handle get_user(x, ptr) */
913 for (i = 0; i < load_store_size; i++) {
914 ret = get_user(b, ptr++);
915 if (!ret) {
916 /* Success! update x. */
917#ifdef __LITTLE_ENDIAN
918 x |= (b << (8 * i));
919#else
920 x <<= 8;
921 x |= b;
922#endif /* __LITTLE_ENDIAN */
923 } else {
924 x = 0;
925 break;
926 }
927 }
928
929 /* Sign-extend 4-byte loads. */
930 if (load_store_size == 4)
931 x = (long)(int)x;
932
933 /* Set register rd. */
934 regs->regs[rd] = x;
935
936 /* Set register rx. */
937 regs->regs[rx] = ret;
938
939 /* Bump pc. */
940 regs->pc += 8;
941
942 } else {
943 /* Handle put_user(x, ptr) */
944 x = regs->regs[rb];
945#ifdef __LITTLE_ENDIAN
946 b = x;
947#else
948 /*
949 * Swap x in order to store x from low
950 * to high memory same as the
951 * little-endian case.
952 */
953 switch (load_store_size) {
954 case 8:
955 b = swab64(x);
956 break;
957 case 4:
958 b = swab32(x);
959 break;
960 case 2:
961 b = swab16(x);
962 break;
963 }
964#endif /* __LITTLE_ENDIAN */
965 for (i = 0; i < load_store_size; i++) {
966 ret = put_user(b, ptr++);
967 if (ret)
968 break;
969 /* Success! shift 1 byte. */
970 b >>= 8;
971 }
972 /* Set register rx. */
973 regs->regs[rx] = ret;
974
975 /* Bump pc. */
976 regs->pc += 8;
977 }
978 }
979
980 unaligned_fixup_count++;
981
982 if (unaligned_printk) {
983 pr_info("%s/%d. Unalign fixup for kernel access "
984 "to userspace %lx.",
985 current->comm, current->pid, regs->regs[ra]);
986 }
987
988 /* Done! Return to the exception handler. */
989 return;
990 }
991
992 if ((align_ctl == 0) || unexpected) {
993 siginfo_t info = {
994 .si_signo = SIGBUS,
995 .si_code = BUS_ADRALN,
996 .si_addr = (unsigned char __user *)0
997 };
998 if (unaligned_printk)
999 pr_info("Unalign bundle: unexp @%llx, %llx",
1000 (unsigned long long)regs->pc,
1001 (unsigned long long)bundle);
1002
1003 if (ra < 56) {
1004 unsigned long uaa = (unsigned long)regs->regs[ra];
1005 /* Set bus Address. */
1006 info.si_addr = (unsigned char __user *)uaa;
1007 }
1008
1009 unaligned_fixup_count++;
1010
1011 trace_unhandled_signal("unaligned fixup trap", regs,
1012 (unsigned long)info.si_addr, SIGBUS);
1013 force_sig_info(info.si_signo, &info, current);
1014 return;
1015 }
1016
1017#ifdef __LITTLE_ENDIAN
1018#define UA_FIXUP_ADDR_DELTA 1
1019#define UA_FIXUP_BFEXT_START(_B_) 0
1020#define UA_FIXUP_BFEXT_END(_B_) (8 * (_B_) - 1)
1021#else /* __BIG_ENDIAN */
1022#define UA_FIXUP_ADDR_DELTA -1
1023#define UA_FIXUP_BFEXT_START(_B_) (64 - 8 * (_B_))
1024#define UA_FIXUP_BFEXT_END(_B_) 63
1025#endif /* __LITTLE_ENDIAN */
1026
1027
1028
1029 if ((ra != rb) && (rd != TREG_SP) && !alias &&
1030 !y1_br && !y1_lr && !x1_add) {
1031 /*
1032 * Simple case: ra != rb and no register alias found,
1033 * and no branch or link. This will be the majority.
1034 * We can do a little better for simplae case than the
1035 * generic scheme below.
1036 */
1037 if (!load_n_store) {
1038 /*
1039 * Simple store: ra != rb, no need for scratch register.
1040 * Just store and rotate to right bytewise.
1041 */
1042#ifdef __BIG_ENDIAN
1043 frag.insn[n++] =
1044 jit_x0_addi(ra, ra, load_store_size - 1) |
1045 jit_x1_fnop();
1046#endif /* __BIG_ENDIAN */
1047 for (k = 0; k < load_store_size; k++) {
1048 /* Store a byte. */
1049 frag.insn[n++] =
1050 jit_x0_rotli(rb, rb, 56) |
1051 jit_x1_st1_add(ra, rb,
1052 UA_FIXUP_ADDR_DELTA);
1053 }
1054#ifdef __BIG_ENDIAN
1055 frag.insn[n] = jit_x1_addi(ra, ra, 1);
1056#else
1057 frag.insn[n] = jit_x1_addi(ra, ra,
1058 -1 * load_store_size);
1059#endif /* __LITTLE_ENDIAN */
1060
1061 if (load_store_size == 8) {
1062 frag.insn[n] |= jit_x0_fnop();
1063 } else if (load_store_size == 4) {
1064 frag.insn[n] |= jit_x0_rotli(rb, rb, 32);
1065 } else { /* = 2 */
1066 frag.insn[n] |= jit_x0_rotli(rb, rb, 16);
1067 }
1068 n++;
1069 if (bundle_2_enable)
1070 frag.insn[n++] = bundle_2;
1071 frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
1072 } else {
1073 if (rd == ra) {
1074 /* Use two clobber registers: clob1/2. */
1075 frag.insn[n++] =
1076 jit_x0_addi(TREG_SP, TREG_SP, -16) |
1077 jit_x1_fnop();
1078 frag.insn[n++] =
1079 jit_x0_addi(clob1, ra, 7) |
1080 jit_x1_st_add(TREG_SP, clob1, -8);
1081 frag.insn[n++] =
1082 jit_x0_addi(clob2, ra, 0) |
1083 jit_x1_st(TREG_SP, clob2);
1084 frag.insn[n++] =
1085 jit_x0_fnop() |
1086 jit_x1_ldna(rd, ra);
1087 frag.insn[n++] =
1088 jit_x0_fnop() |
1089 jit_x1_ldna(clob1, clob1);
1090 /*
1091 * Note: we must make sure that rd must not
1092 * be sp. Recover clob1/2 from stack.
1093 */
1094 frag.insn[n++] =
1095 jit_x0_dblalign(rd, clob1, clob2) |
1096 jit_x1_ld_add(clob2, TREG_SP, 8);
1097 frag.insn[n++] =
1098 jit_x0_fnop() |
1099 jit_x1_ld_add(clob1, TREG_SP, 16);
1100 } else {
1101 /* Use one clobber register: clob1 only. */
1102 frag.insn[n++] =
1103 jit_x0_addi(TREG_SP, TREG_SP, -16) |
1104 jit_x1_fnop();
1105 frag.insn[n++] =
1106 jit_x0_addi(clob1, ra, 7) |
1107 jit_x1_st(TREG_SP, clob1);
1108 frag.insn[n++] =
1109 jit_x0_fnop() |
1110 jit_x1_ldna(rd, ra);
1111 frag.insn[n++] =
1112 jit_x0_fnop() |
1113 jit_x1_ldna(clob1, clob1);
1114 /*
1115 * Note: we must make sure that rd must not
1116 * be sp. Recover clob1 from stack.
1117 */
1118 frag.insn[n++] =
1119 jit_x0_dblalign(rd, clob1, ra) |
1120 jit_x1_ld_add(clob1, TREG_SP, 16);
1121 }
1122
1123 if (bundle_2_enable)
1124 frag.insn[n++] = bundle_2;
1125 /*
1126 * For non 8-byte load, extract corresponding bytes and
1127 * signed extension.
1128 */
1129 if (load_store_size == 4) {
1130 if (load_store_signed)
1131 frag.insn[n++] =
1132 jit_x0_bfexts(
1133 rd, rd,
1134 UA_FIXUP_BFEXT_START(4),
1135 UA_FIXUP_BFEXT_END(4)) |
1136 jit_x1_fnop();
1137 else
1138 frag.insn[n++] =
1139 jit_x0_bfextu(
1140 rd, rd,
1141 UA_FIXUP_BFEXT_START(4),
1142 UA_FIXUP_BFEXT_END(4)) |
1143 jit_x1_fnop();
1144 } else if (load_store_size == 2) {
1145 if (load_store_signed)
1146 frag.insn[n++] =
1147 jit_x0_bfexts(
1148 rd, rd,
1149 UA_FIXUP_BFEXT_START(2),
1150 UA_FIXUP_BFEXT_END(2)) |
1151 jit_x1_fnop();
1152 else
1153 frag.insn[n++] =
1154 jit_x0_bfextu(
1155 rd, rd,
1156 UA_FIXUP_BFEXT_START(2),
1157 UA_FIXUP_BFEXT_END(2)) |
1158 jit_x1_fnop();
1159 }
1160
1161 frag.insn[n++] =
1162 jit_x0_fnop() |
1163 jit_x1_iret();
1164 }
1165 } else if (!load_n_store) {
1166
1167 /*
1168 * Generic memory store cases: use 3 clobber registers.
1169 *
1170 * Alloc space for saveing clob2,1,3 on user's stack.
1171 * register clob3 points to where clob2 saved, followed by
1172 * clob1 and 3 from high to low memory.
1173 */
1174 frag.insn[n++] =
1175 jit_x0_addi(TREG_SP, TREG_SP, -32) |
1176 jit_x1_fnop();
1177 frag.insn[n++] =
1178 jit_x0_addi(clob3, TREG_SP, 16) |
1179 jit_x1_st_add(TREG_SP, clob3, 8);
1180#ifdef __LITTLE_ENDIAN
1181 frag.insn[n++] =
1182 jit_x0_addi(clob1, ra, 0) |
1183 jit_x1_st_add(TREG_SP, clob1, 8);
1184#else
1185 frag.insn[n++] =
1186 jit_x0_addi(clob1, ra, load_store_size - 1) |
1187 jit_x1_st_add(TREG_SP, clob1, 8);
1188#endif
1189 if (load_store_size == 8) {
1190 /*
1191 * We save one byte a time, not for fast, but compact
1192 * code. After each store, data source register shift
1193 * right one byte. unchanged after 8 stores.
1194 */
1195 frag.insn[n++] =
1196 jit_x0_addi(clob2, TREG_ZERO, 7) |
1197 jit_x1_st_add(TREG_SP, clob2, 16);
1198 frag.insn[n++] =
1199 jit_x0_rotli(rb, rb, 56) |
1200 jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
1201 frag.insn[n++] =
1202 jit_x0_addi(clob2, clob2, -1) |
1203 jit_x1_bnezt(clob2, -1);
1204 frag.insn[n++] =
1205 jit_x0_fnop() |
1206 jit_x1_addi(clob2, y1_br_reg, 0);
1207 } else if (load_store_size == 4) {
1208 frag.insn[n++] =
1209 jit_x0_addi(clob2, TREG_ZERO, 3) |
1210 jit_x1_st_add(TREG_SP, clob2, 16);
1211 frag.insn[n++] =
1212 jit_x0_rotli(rb, rb, 56) |
1213 jit_x1_st1_add(clob1, rb, UA_FIXUP_ADDR_DELTA);
1214 frag.insn[n++] =
1215 jit_x0_addi(clob2, clob2, -1) |
1216 jit_x1_bnezt(clob2, -1);
1217 /*
1218 * same as 8-byte case, but need shift another 4
1219 * byte to recover rb for 4-byte store.
1220 */
1221 frag.insn[n++] = jit_x0_rotli(rb, rb, 32) |
1222 jit_x1_addi(clob2, y1_br_reg, 0);
1223 } else { /* =2 */
1224 frag.insn[n++] =
1225 jit_x0_addi(clob2, rb, 0) |
1226 jit_x1_st_add(TREG_SP, clob2, 16);
1227 for (k = 0; k < 2; k++) {
1228 frag.insn[n++] =
1229 jit_x0_shrui(rb, rb, 8) |
1230 jit_x1_st1_add(clob1, rb,
1231 UA_FIXUP_ADDR_DELTA);
1232 }
1233 frag.insn[n++] =
1234 jit_x0_addi(rb, clob2, 0) |
1235 jit_x1_addi(clob2, y1_br_reg, 0);
1236 }
1237
1238 if (bundle_2_enable)
1239 frag.insn[n++] = bundle_2;
1240
1241 if (y1_lr) {
1242 frag.insn[n++] =
1243 jit_x0_fnop() |
1244 jit_x1_mfspr(y1_lr_reg,
1245 SPR_EX_CONTEXT_0_0);
1246 }
1247 if (y1_br) {
1248 frag.insn[n++] =
1249 jit_x0_fnop() |
1250 jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
1251 clob2);
1252 }
1253 if (x1_add) {
1254 frag.insn[n++] =
1255 jit_x0_addi(ra, ra, x1_add_imm8) |
1256 jit_x1_ld_add(clob2, clob3, -8);
1257 } else {
1258 frag.insn[n++] =
1259 jit_x0_fnop() |
1260 jit_x1_ld_add(clob2, clob3, -8);
1261 }
1262 frag.insn[n++] =
1263 jit_x0_fnop() |
1264 jit_x1_ld_add(clob1, clob3, -8);
1265 frag.insn[n++] = jit_x0_fnop() | jit_x1_ld(clob3, clob3);
1266 frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
1267
1268 } else {
1269 /*
1270 * Generic memory load cases.
1271 *
1272 * Alloc space for saveing clob1,2,3 on user's stack.
1273 * register clob3 points to where clob1 saved, followed
1274 * by clob2 and 3 from high to low memory.
1275 */
1276
1277 frag.insn[n++] =
1278 jit_x0_addi(TREG_SP, TREG_SP, -32) |
1279 jit_x1_fnop();
1280 frag.insn[n++] =
1281 jit_x0_addi(clob3, TREG_SP, 16) |
1282 jit_x1_st_add(TREG_SP, clob3, 8);
1283 frag.insn[n++] =
1284 jit_x0_addi(clob2, ra, 0) |
1285 jit_x1_st_add(TREG_SP, clob2, 8);
1286
1287 if (y1_br) {
1288 frag.insn[n++] =
1289 jit_x0_addi(clob1, y1_br_reg, 0) |
1290 jit_x1_st_add(TREG_SP, clob1, 16);
1291 } else {
1292 frag.insn[n++] =
1293 jit_x0_fnop() |
1294 jit_x1_st_add(TREG_SP, clob1, 16);
1295 }
1296
1297 if (bundle_2_enable)
1298 frag.insn[n++] = bundle_2;
1299
1300 if (y1_lr) {
1301 frag.insn[n++] =
1302 jit_x0_fnop() |
1303 jit_x1_mfspr(y1_lr_reg,
1304 SPR_EX_CONTEXT_0_0);
1305 }
1306
1307 if (y1_br) {
1308 frag.insn[n++] =
1309 jit_x0_fnop() |
1310 jit_x1_mtspr(SPR_EX_CONTEXT_0_0,
1311 clob1);
1312 }
1313
1314 frag.insn[n++] =
1315 jit_x0_addi(clob1, clob2, 7) |
1316 jit_x1_ldna(rd, clob2);
1317 frag.insn[n++] =
1318 jit_x0_fnop() |
1319 jit_x1_ldna(clob1, clob1);
1320 frag.insn[n++] =
1321 jit_x0_dblalign(rd, clob1, clob2) |
1322 jit_x1_ld_add(clob1, clob3, -8);
1323 if (x1_add) {
1324 frag.insn[n++] =
1325 jit_x0_addi(ra, ra, x1_add_imm8) |
1326 jit_x1_ld_add(clob2, clob3, -8);
1327 } else {
1328 frag.insn[n++] =
1329 jit_x0_fnop() |
1330 jit_x1_ld_add(clob2, clob3, -8);
1331 }
1332
1333 frag.insn[n++] =
1334 jit_x0_fnop() |
1335 jit_x1_ld(clob3, clob3);
1336
1337 if (load_store_size == 4) {
1338 if (load_store_signed)
1339 frag.insn[n++] =
1340 jit_x0_bfexts(
1341 rd, rd,
1342 UA_FIXUP_BFEXT_START(4),
1343 UA_FIXUP_BFEXT_END(4)) |
1344 jit_x1_fnop();
1345 else
1346 frag.insn[n++] =
1347 jit_x0_bfextu(
1348 rd, rd,
1349 UA_FIXUP_BFEXT_START(4),
1350 UA_FIXUP_BFEXT_END(4)) |
1351 jit_x1_fnop();
1352 } else if (load_store_size == 2) {
1353 if (load_store_signed)
1354 frag.insn[n++] =
1355 jit_x0_bfexts(
1356 rd, rd,
1357 UA_FIXUP_BFEXT_START(2),
1358 UA_FIXUP_BFEXT_END(2)) |
1359 jit_x1_fnop();
1360 else
1361 frag.insn[n++] =
1362 jit_x0_bfextu(
1363 rd, rd,
1364 UA_FIXUP_BFEXT_START(2),
1365 UA_FIXUP_BFEXT_END(2)) |
1366 jit_x1_fnop();
1367 }
1368
1369 frag.insn[n++] = jit_x0_fnop() | jit_x1_iret();
1370 }
1371
1372 /* Max JIT bundle count is 14. */
1373 WARN_ON(n > 14);
1374
1375 if (!unexpected) {
1376 int status = 0;
1377 int idx = (regs->pc >> 3) &
1378 ((1ULL << (PAGE_SHIFT - UNALIGN_JIT_SHIFT)) - 1);
1379
1380 frag.pc = regs->pc;
1381 frag.bundle = bundle;
1382
1383 if (unaligned_printk) {
1384 pr_info("%s/%d, Unalign fixup: pc=%lx "
1385 "bundle=%lx %d %d %d %d %d %d %d %d.",
1386 current->comm, current->pid,
1387 (unsigned long)frag.pc,
1388 (unsigned long)frag.bundle,
1389 (int)alias, (int)rd, (int)ra,
1390 (int)rb, (int)bundle_2_enable,
1391 (int)y1_lr, (int)y1_br, (int)x1_add);
1392
1393 for (k = 0; k < n; k += 2)
1394 pr_info("[%d] %016llx %016llx", k,
1395 (unsigned long long)frag.insn[k],
1396 (unsigned long long)frag.insn[k+1]);
1397 }
1398
1399 /* Swap bundle byte order for big endian sys. */
1400#ifdef __BIG_ENDIAN
1401 frag.bundle = GX_INSN_BSWAP(frag.bundle);
1402 for (k = 0; k < n; k++)
1403 frag.insn[k] = GX_INSN_BSWAP(frag.insn[k]);
1404#endif /* __BIG_ENDIAN */
1405
1406 status = copy_to_user((void __user *)&jit_code_area[idx],
1407 &frag, sizeof(frag));
1408 if (status) {
1409 /* Fail to copy JIT into user land. send SIGSEGV. */
1410 siginfo_t info = {
1411 .si_signo = SIGSEGV,
1412 .si_code = SEGV_MAPERR,
1413 .si_addr = (void __user *)&jit_code_area[idx]
1414 };
1415
1416 pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx",
1417 current->pid, current->comm,
1418 (unsigned long long)&jit_code_area[idx]);
1419
1420 trace_unhandled_signal("segfault in unalign fixup",
1421 regs,
1422 (unsigned long)info.si_addr,
1423 SIGSEGV);
1424 force_sig_info(info.si_signo, &info, current);
1425 return;
1426 }
1427
1428
1429 /* Do a cheaper increment, not accurate. */
1430 unaligned_fixup_count++;
1431 __flush_icache_range((unsigned long)&jit_code_area[idx],
1432 (unsigned long)&jit_code_area[idx] +
1433 sizeof(frag));
1434
1435 /* Setup SPR_EX_CONTEXT_0_0/1 for returning to user program.*/
1436 __insn_mtspr(SPR_EX_CONTEXT_0_0, regs->pc + 8);
1437 __insn_mtspr(SPR_EX_CONTEXT_0_1, PL_ICS_EX1(USER_PL, 0));
1438
1439 /* Modify pc at the start of new JIT. */
1440 regs->pc = (unsigned long)&jit_code_area[idx].insn[0];
1441 /* Set ICS in SPR_EX_CONTEXT_K_1. */
1442 regs->ex1 = PL_ICS_EX1(USER_PL, 1);
1443 }
1444}
1445
1446
1447/*
1448 * C function to generate unalign data JIT. Called from unalign data
1449 * interrupt handler.
1450 *
1451 * First check if unalign fix is disabled or exception did not not come from
1452 * user space or sp register points to unalign address, if true, generate a
1453 * SIGBUS. Then map a page into user space as JIT area if it is not mapped
1454 * yet. Genenerate JIT code by calling jit_bundle_gen(). After that return
1455 * back to exception handler.
1456 *
1457 * The exception handler will "iret" to new generated JIT code after
1458 * restoring caller saved registers. In theory, the JIT code will perform
1459 * another "iret" to resume user's program.
1460 */
1461
1462void do_unaligned(struct pt_regs *regs, int vecnum)
1463{
1464 tilegx_bundle_bits __user *pc;
1465 tilegx_bundle_bits bundle;
1466 struct thread_info *info = current_thread_info();
1467 int align_ctl;
1468
1469 /* Checks the per-process unaligned JIT flags */
1470 align_ctl = unaligned_fixup;
1471 switch (task_thread_info(current)->align_ctl) {
1472 case PR_UNALIGN_NOPRINT:
1473 align_ctl = 1;
1474 break;
1475 case PR_UNALIGN_SIGBUS:
1476 align_ctl = 0;
1477 break;
1478 }
1479
1480 /* Enable iterrupt in order to access user land. */
1481 local_irq_enable();
1482
1483 /*
1484 * The fault came from kernel space. Two choices:
1485 * (a) unaligned_fixup < 1, we will first call get/put_user fixup
1486 * to return -EFAULT. If no fixup, simply panic the kernel.
1487 * (b) unaligned_fixup >=1, we will try to fix the unaligned access
1488 * if it was triggered by get_user/put_user() macros. Panic the
1489 * kernel if it is not fixable.
1490 */
1491
1492 if (EX1_PL(regs->ex1) != USER_PL) {
1493
1494 if (align_ctl < 1) {
1495 unaligned_fixup_count++;
1496 /* If exception came from kernel, try fix it up. */
1497 if (fixup_exception(regs)) {
1498 if (unaligned_printk)
1499 pr_info("Unalign fixup: %d %llx @%llx",
1500 (int)unaligned_fixup,
1501 (unsigned long long)regs->ex1,
1502 (unsigned long long)regs->pc);
1503 return;
1504 }
1505 /* Not fixable. Go panic. */
1506 panic("Unalign exception in Kernel. pc=%lx",
1507 regs->pc);
1508 return;
1509 } else {
1510 /*
1511 * Try to fix the exception. If we can't, panic the
1512 * kernel.
1513 */
1514 bundle = GX_INSN_BSWAP(
1515 *((tilegx_bundle_bits *)(regs->pc)));
1516 jit_bundle_gen(regs, bundle, align_ctl);
1517 return;
1518 }
1519 }
1520
1521 /*
1522 * Fault came from user with ICS or stack is not aligned.
1523 * If so, we will trigger SIGBUS.
1524 */
1525 if ((regs->sp & 0x7) || (regs->ex1) || (align_ctl < 0)) {
1526 siginfo_t info = {
1527 .si_signo = SIGBUS,
1528 .si_code = BUS_ADRALN,
1529 .si_addr = (unsigned char __user *)0
1530 };
1531
1532 if (unaligned_printk)
1533 pr_info("Unalign fixup: %d %llx @%llx",
1534 (int)unaligned_fixup,
1535 (unsigned long long)regs->ex1,
1536 (unsigned long long)regs->pc);
1537
1538 unaligned_fixup_count++;
1539
1540 trace_unhandled_signal("unaligned fixup trap", regs, 0, SIGBUS);
1541 force_sig_info(info.si_signo, &info, current);
1542 return;
1543 }
1544
1545
1546 /* Read the bundle casued the exception! */
1547 pc = (tilegx_bundle_bits __user *)(regs->pc);
1548 if (get_user(bundle, pc) != 0) {
1549 /* Probably never be here since pc is valid user address.*/
1550 siginfo_t info = {
1551 .si_signo = SIGSEGV,
1552 .si_code = SEGV_MAPERR,
1553 .si_addr = (void __user *)pc
1554 };
1555 pr_err("Couldn't read instruction at %p trying to step\n", pc);
1556 trace_unhandled_signal("segfault in unalign fixup", regs,
1557 (unsigned long)info.si_addr, SIGSEGV);
1558 force_sig_info(info.si_signo, &info, current);
1559 return;
1560 }
1561
1562 if (!info->unalign_jit_base) {
1563 void __user *user_page;
1564
1565 /*
1566 * Allocate a page in userland.
1567 * For 64-bit processes we try to place the mapping far
1568 * from anything else that might be going on (specifically
1569 * 64 GB below the top of the user address space). If it
1570 * happens not to be possible to put it there, it's OK;
1571 * the kernel will choose another location and we'll
1572 * remember it for later.
1573 */
1574 if (is_compat_task())
1575 user_page = NULL;
1576 else
1577 user_page = (void __user *)(TASK_SIZE - (1UL << 36)) +
1578 (current->pid << PAGE_SHIFT);
1579
1580 user_page = (void __user *) vm_mmap(NULL,
1581 (unsigned long)user_page,
1582 PAGE_SIZE,
1583 PROT_EXEC | PROT_READ |
1584 PROT_WRITE,
1585#ifdef CONFIG_HOMECACHE
1586 MAP_CACHE_HOME_TASK |
1587#endif
1588 MAP_PRIVATE |
1589 MAP_ANONYMOUS,
1590 0);
1591
1592 if (IS_ERR((void __force *)user_page)) {
1593 pr_err("Out of kernel pages trying do_mmap.\n");
1594 return;
1595 }
1596
1597 /* Save the address in the thread_info struct */
1598 info->unalign_jit_base = user_page;
1599 if (unaligned_printk)
1600 pr_info("Unalign bundle: %d:%d, allocate page @%llx",
1601 raw_smp_processor_id(), current->pid,
1602 (unsigned long long)user_page);
1603 }
1604
1605 /* Generate unalign JIT */
1606 jit_bundle_gen(regs, GX_INSN_BSWAP(bundle), align_ctl);
1607}
1608
1609#endif /* __tilegx__ */
diff --git a/arch/tile/kernel/vdso.c b/arch/tile/kernel/vdso.c
new file mode 100644
index 000000000000..1533af24106e
--- /dev/null
+++ b/arch/tile/kernel/vdso.c
@@ -0,0 +1,212 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/binfmts.h>
16#include <linux/compat.h>
17#include <linux/elf.h>
18#include <linux/mm.h>
19#include <linux/pagemap.h>
20
21#include <asm/vdso.h>
22#include <asm/mman.h>
23#include <asm/sections.h>
24
25#include <arch/sim.h>
26
27/* The alignment of the vDSO. */
28#define VDSO_ALIGNMENT PAGE_SIZE
29
30
31static unsigned int vdso_pages;
32static struct page **vdso_pagelist;
33
34#ifdef CONFIG_COMPAT
35static unsigned int vdso32_pages;
36static struct page **vdso32_pagelist;
37#endif
38static int vdso_ready;
39
40/*
41 * The vdso data page.
42 */
43static union {
44 struct vdso_data data;
45 u8 page[PAGE_SIZE];
46} vdso_data_store __page_aligned_data;
47
48struct vdso_data *vdso_data = &vdso_data_store.data;
49
50static unsigned int __read_mostly vdso_enabled = 1;
51
52static struct page **vdso_setup(void *vdso_kbase, unsigned int pages)
53{
54 int i;
55 struct page **pagelist;
56
57 pagelist = kzalloc(sizeof(struct page *) * (pages + 1), GFP_KERNEL);
58 BUG_ON(pagelist == NULL);
59 for (i = 0; i < pages - 1; i++) {
60 struct page *pg = virt_to_page(vdso_kbase + i*PAGE_SIZE);
61 ClearPageReserved(pg);
62 pagelist[i] = pg;
63 }
64 pagelist[pages - 1] = virt_to_page(vdso_data);
65 pagelist[pages] = NULL;
66
67 return pagelist;
68}
69
70static int __init vdso_init(void)
71{
72 int data_pages = sizeof(vdso_data_store) >> PAGE_SHIFT;
73
74 /*
75 * We can disable vDSO support generally, but we need to retain
76 * one page to support the two-bundle (16-byte) rt_sigreturn path.
77 */
78 if (!vdso_enabled) {
79 size_t offset = (unsigned long)&__vdso_rt_sigreturn;
80 static struct page *sigret_page;
81 sigret_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
82 BUG_ON(sigret_page == NULL);
83 vdso_pagelist = &sigret_page;
84 vdso_pages = 1;
85 BUG_ON(offset >= PAGE_SIZE);
86 memcpy(page_address(sigret_page) + offset,
87 vdso_start + offset, 16);
88#ifdef CONFIG_COMPAT
89 vdso32_pages = vdso_pages;
90 vdso32_pagelist = vdso_pagelist;
91#endif
92 vdso_ready = 1;
93 return 0;
94 }
95
96 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
97 vdso_pages += data_pages;
98 vdso_pagelist = vdso_setup(vdso_start, vdso_pages);
99
100#ifdef CONFIG_COMPAT
101 vdso32_pages = (vdso32_end - vdso32_start) >> PAGE_SHIFT;
102 vdso32_pages += data_pages;
103 vdso32_pagelist = vdso_setup(vdso32_start, vdso32_pages);
104#endif
105
106 smp_wmb();
107 vdso_ready = 1;
108
109 return 0;
110}
111arch_initcall(vdso_init);
112
113const char *arch_vma_name(struct vm_area_struct *vma)
114{
115 if (vma->vm_mm && vma->vm_start == VDSO_BASE)
116 return "[vdso]";
117#ifndef __tilegx__
118 if (vma->vm_start == MEM_USER_INTRPT)
119 return "[intrpt]";
120#endif
121 return NULL;
122}
123
124struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
125{
126 return NULL;
127}
128
129int in_gate_area(struct mm_struct *mm, unsigned long address)
130{
131 return 0;
132}
133
134int in_gate_area_no_mm(unsigned long address)
135{
136 return 0;
137}
138
139int setup_vdso_pages(void)
140{
141 struct page **pagelist;
142 unsigned long pages;
143 struct mm_struct *mm = current->mm;
144 unsigned long vdso_base = 0;
145 int retval = 0;
146
147 if (!vdso_ready)
148 return 0;
149
150 mm->context.vdso_base = 0;
151
152 pagelist = vdso_pagelist;
153 pages = vdso_pages;
154#ifdef CONFIG_COMPAT
155 if (is_compat_task()) {
156 pagelist = vdso32_pagelist;
157 pages = vdso32_pages;
158 }
159#endif
160
161 /*
162 * vDSO has a problem and was disabled, just don't "enable" it for the
163 * process.
164 */
165 if (pages == 0)
166 return 0;
167
168 vdso_base = get_unmapped_area(NULL, vdso_base,
169 (pages << PAGE_SHIFT) +
170 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
171 0, 0);
172 if (IS_ERR_VALUE(vdso_base)) {
173 retval = vdso_base;
174 return retval;
175 }
176
177 /* Add required alignment. */
178 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
179
180 /*
181 * Put vDSO base into mm struct. We need to do this before calling
182 * install_special_mapping or the perf counter mmap tracking code
183 * will fail to recognise it as a vDSO (since arch_vma_name fails).
184 */
185 mm->context.vdso_base = vdso_base;
186
187 /*
188 * our vma flags don't have VM_WRITE so by default, the process isn't
189 * allowed to write those pages.
190 * gdb can break that with ptrace interface, and thus trigger COW on
191 * those pages but it's then your responsibility to never do that on
192 * the "data" page of the vDSO or you'll stop getting kernel updates
193 * and your nice userland gettimeofday will be totally dead.
194 * It's fine to use that for setting breakpoints in the vDSO code
195 * pages though
196 */
197 retval = install_special_mapping(mm, vdso_base,
198 pages << PAGE_SHIFT,
199 VM_READ|VM_EXEC |
200 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
201 pagelist);
202 if (retval)
203 mm->context.vdso_base = 0;
204
205 return retval;
206}
207
208static __init int vdso_func(char *s)
209{
210 return kstrtouint(s, 0, &vdso_enabled);
211}
212__setup("vdso=", vdso_func);
diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile
new file mode 100644
index 000000000000..e2b7a2f4ee41
--- /dev/null
+++ b/arch/tile/kernel/vdso/Makefile
@@ -0,0 +1,118 @@
1# Symbols present in the vdso
2vdso-syms = rt_sigreturn gettimeofday
3
4# Files to link into the vdso
5obj-vdso = $(patsubst %, v%.o, $(vdso-syms))
6
7# Build rules
8targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds
9obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
10
11# vdso32 is only for tilegx -m32 compat task.
12VDSO32-$(CONFIG_COMPAT) := y
13
14obj-y += vdso.o
15obj-$(VDSO32-y) += vdso32.o
16extra-y += vdso.lds
17CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
18
19# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
20CFLAGS_REMOVE_vdso.o = -pg
21CFLAGS_REMOVE_vdso32.o = -pg
22CFLAGS_REMOVE_vrt_sigreturn.o = -pg
23CFLAGS_REMOVE_vrt_sigreturn32.o = -pg
24CFLAGS_REMOVE_vgettimeofday.o = -pg
25CFLAGS_REMOVE_vgettimeofday32.o = -pg
26
27ifdef CONFIG_FEEDBACK_COLLECT
28# vDSO code runs in userspace, not collecting feedback data.
29CFLAGS_REMOVE_vdso.o = -ffeedback-generate
30CFLAGS_REMOVE_vdso32.o = -ffeedback-generate
31CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate
32CFLAGS_REMOVE_vrt_sigreturn32.o = -ffeedback-generate
33CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate
34CFLAGS_REMOVE_vgettimeofday32.o = -ffeedback-generate
35endif
36
37# Disable gcov profiling for VDSO code
38GCOV_PROFILE := n
39
40# Force dependency
41$(obj)/vdso.o: $(obj)/vdso.so
42
43# link rule for the .so file, .lds has to be first
44SYSCFLAGS_vdso.so.dbg = $(c_flags)
45$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
46 $(call if_changed,vdsold)
47
48
49# We also create a special relocatable object that should mirror the symbol
50# table and layout of the linked DSO. With ld -R we can then refer to
51# these symbols in the kernel code rather than hand-coded addresses.
52extra-y += vdso-syms.o
53$(obj)/built-in.o: $(obj)/vdso-syms.o
54$(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o
55
56SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
57 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
58SYSCFLAGS_vdso_syms.o = -r
59$(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE
60 $(call if_changed,vdsold)
61
62
63# strip rule for the .so file
64$(obj)/%.so: OBJCOPYFLAGS := -S
65$(obj)/%.so: $(obj)/%.so.dbg FORCE
66 $(call if_changed,objcopy)
67
68# actual build commands
69# The DSO images are built using a special linker script
70# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions.
71# Make sure only to export the intended __vdso_xxx symbol offsets.
72quiet_cmd_vdsold = VDSOLD $@
73 cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \
74 -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \
75 $(CROSS_COMPILE)objcopy \
76 $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@
77
78# install commands for the unstripped file
79quiet_cmd_vdso_install = INSTALL $@
80 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
81
82vdso.so: $(obj)/vdso.so.dbg
83 @mkdir -p $(MODLIB)/vdso
84 $(call cmd,vdso_install)
85
86vdso32.so: $(obj)/vdso32.so.dbg
87 $(call cmd,vdso_install)
88
89vdso_install: vdso.so
90vdso32_install: vdso32.so
91
92
93KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
94KBUILD_AFLAGS_32 += -m32 -s
95KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
96KBUILD_CFLAGS_32 += -m32 -fPIC -shared
97
98obj-vdso32 = $(patsubst %, v%32.o, $(vdso-syms))
99obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32))
100
101targets += $(obj-vdso32) vdso32.so vdso32.so.dbg
102
103$(obj-vdso32:%=%): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
104$(obj-vdso32:%=%): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
105
106$(obj)/vgettimeofday32.o: $(obj)/vgettimeofday.c
107 $(call if_changed,cc_o_c)
108
109$(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S
110 $(call if_changed,as_o_S)
111
112# Force dependency
113$(obj)/vdso32.o: $(obj)/vdso32.so
114
115SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \
116 $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
117$(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32)
118 $(call if_changed,vdsold)
diff --git a/arch/tile/kernel/vdso/vdso.S b/arch/tile/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..3467adb41630
--- /dev/null
+++ b/arch/tile/kernel/vdso/vdso.S
@@ -0,0 +1,28 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/init.h>
16#include <linux/linkage.h>
17#include <asm/page.h>
18
19 __PAGE_ALIGNED_DATA
20
21 .global vdso_start, vdso_end
22 .align PAGE_SIZE
23vdso_start:
24 .incbin "arch/tile/kernel/vdso/vdso.so"
25 .align PAGE_SIZE
26vdso_end:
27
28 .previous
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..041cd6c39c83
--- /dev/null
+++ b/arch/tile/kernel/vdso/vdso.lds.S
@@ -0,0 +1,87 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#define VDSO_VERSION_STRING LINUX_2.6
16
17
18OUTPUT_ARCH(tile)
19
20/* The ELF entry point can be used to set the AT_SYSINFO value. */
21ENTRY(__vdso_rt_sigreturn);
22
23
24SECTIONS
25{
26 . = SIZEOF_HEADERS;
27
28 .hash : { *(.hash) } :text
29 .gnu.hash : { *(.gnu.hash) }
30 .dynsym : { *(.dynsym) }
31 .dynstr : { *(.dynstr) }
32 .gnu.version : { *(.gnu.version) }
33 .gnu.version_d : { *(.gnu.version_d) }
34 .gnu.version_r : { *(.gnu.version_r) }
35
36 .note : { *(.note.*) } :text :note
37 .dynamic : { *(.dynamic) } :text :dynamic
38
39 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
40 .eh_frame : { KEEP (*(.eh_frame)) } :text
41
42 .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
43
44 /*
45 * This linker script is used both with -r and with -shared.
46 * For the layouts to match, we need to skip more than enough
47 * space for the dynamic symbol table et al. If this amount
48 * is insufficient, ld -shared will barf. Just increase it here.
49 */
50 . = 0x1000;
51 .text : { *(.text .text.*) } :text
52
53 .data : {
54 *(.got.plt) *(.got)
55 *(.data .data.* .gnu.linkonce.d.*)
56 *(.dynbss)
57 *(.bss .bss.* .gnu.linkonce.b.*)
58 }
59}
60
61
62/*
63 * We must supply the ELF program headers explicitly to get just one
64 * PT_LOAD segment, and set the flags explicitly to make segments read-only.
65 */
66PHDRS
67{
68 text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
69 dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
70 note PT_NOTE FLAGS(4); /* PF_R */
71 eh_frame_hdr PT_GNU_EH_FRAME;
72}
73
74
75/*
76 * This controls what userland symbols we export from the vDSO.
77 */
78VERSION
79{
80 VDSO_VERSION_STRING {
81 global:
82 __vdso_rt_sigreturn;
83 __vdso_gettimeofday;
84 gettimeofday;
85 local:*;
86 };
87}
diff --git a/arch/tile/kernel/vdso/vdso32.S b/arch/tile/kernel/vdso/vdso32.S
new file mode 100644
index 000000000000..1d1ac3257e11
--- /dev/null
+++ b/arch/tile/kernel/vdso/vdso32.S
@@ -0,0 +1,28 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/init.h>
16#include <linux/linkage.h>
17#include <asm/page.h>
18
19 __PAGE_ALIGNED_DATA
20
21 .global vdso32_start, vdso32_end
22 .align PAGE_SIZE
23vdso32_start:
24 .incbin "arch/tile/kernel/vdso/vdso32.so"
25 .align PAGE_SIZE
26vdso32_end:
27
28 .previous
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000000..51ec8e46f5f9
--- /dev/null
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
16#include <linux/time.h>
17#include <asm/timex.h>
18#include <asm/vdso.h>
19
20#if CHIP_HAS_SPLIT_CYCLE()
21static inline cycles_t get_cycles_inline(void)
22{
23 unsigned int high = __insn_mfspr(SPR_CYCLE_HIGH);
24 unsigned int low = __insn_mfspr(SPR_CYCLE_LOW);
25 unsigned int high2 = __insn_mfspr(SPR_CYCLE_HIGH);
26
27 while (unlikely(high != high2)) {
28 low = __insn_mfspr(SPR_CYCLE_LOW);
29 high = high2;
30 high2 = __insn_mfspr(SPR_CYCLE_HIGH);
31 }
32
33 return (((cycles_t)high) << 32) | low;
34}
35#define get_cycles get_cycles_inline
36#endif
37
38/*
39 * Find out the vDSO data page address in the process address space.
40 */
41inline unsigned long get_datapage(void)
42{
43 unsigned long ret;
44
45 /* vdso data page located in the 2nd vDSO page. */
46 asm volatile ("lnk %0" : "=r"(ret));
47 ret &= ~(PAGE_SIZE - 1);
48 ret += PAGE_SIZE;
49
50 return ret;
51}
52
53int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
54{
55 cycles_t cycles;
56 unsigned long count, sec, ns;
57 volatile struct vdso_data *vdso_data;
58
59 vdso_data = (struct vdso_data *)get_datapage();
60 /* The use of the timezone is obsolete, normally tz is NULL. */
61 if (unlikely(tz != NULL)) {
62 while (1) {
63 /* Spin until the update finish. */
64 count = vdso_data->tz_update_count;
65 if (count & 1)
66 continue;
67
68 tz->tz_minuteswest = vdso_data->tz_minuteswest;
69 tz->tz_dsttime = vdso_data->tz_dsttime;
70
71 /* Check whether updated, read again if so. */
72 if (count == vdso_data->tz_update_count)
73 break;
74 }
75 }
76
77 if (unlikely(tv == NULL))
78 return 0;
79
80 while (1) {
81 /* Spin until the update finish. */
82 count = vdso_data->tb_update_count;
83 if (count & 1)
84 continue;
85
86 cycles = (get_cycles() - vdso_data->xtime_tod_stamp);
87 ns = (cycles * vdso_data->mult) >> vdso_data->shift;
88 sec = vdso_data->xtime_clock_sec;
89 ns += vdso_data->xtime_clock_nsec;
90 if (ns >= NSEC_PER_SEC) {
91 ns -= NSEC_PER_SEC;
92 sec += 1;
93 }
94
95 /* Check whether updated, read again if so. */
96 if (count == vdso_data->tb_update_count)
97 break;
98 }
99
100 tv->tv_sec = sec;
101 tv->tv_usec = ns / 1000;
102
103 return 0;
104}
105
106int gettimeofday(struct timeval *tv, struct timezone *tz)
107 __attribute__((weak, alias("__vdso_gettimeofday")));
diff --git a/arch/tile/kernel/vdso/vrt_sigreturn.S b/arch/tile/kernel/vdso/vrt_sigreturn.S
new file mode 100644
index 000000000000..6326caf4a039
--- /dev/null
+++ b/arch/tile/kernel/vdso/vrt_sigreturn.S
@@ -0,0 +1,30 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/linkage.h>
16#include <arch/abi.h>
17#include <asm/unistd.h>
18
19/*
20 * Note that libc has a copy of this function that it uses to compare
21 * against the PC when a stack backtrace ends, so if this code is
22 * changed, the libc implementation(s) should also be updated.
23 */
24ENTRY(__vdso_rt_sigreturn)
25 moveli TREG_SYSCALL_NR_NAME, __NR_rt_sigreturn
26 swint1
27 /* We don't use ENDPROC to avoid tagging this symbol as FUNC,
28 * which confuses the perf tool.
29 */
30 END(__vdso_rt_sigreturn)
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index a13ed902afbb..f1819423ffc9 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -5,7 +5,7 @@
5#include <hv/hypervisor.h> 5#include <hv/hypervisor.h>
6 6
7/* Text loads starting from the supervisor interrupt vector address. */ 7/* Text loads starting from the supervisor interrupt vector address. */
8#define TEXT_OFFSET MEM_SV_INTRPT 8#define TEXT_OFFSET MEM_SV_START
9 9
10OUTPUT_ARCH(tile) 10OUTPUT_ARCH(tile)
11ENTRY(_start) 11ENTRY(_start)
@@ -13,7 +13,7 @@ jiffies = jiffies_64;
13 13
14PHDRS 14PHDRS
15{ 15{
16 intrpt1 PT_LOAD ; 16 intrpt PT_LOAD ;
17 text PT_LOAD ; 17 text PT_LOAD ;
18 data PT_LOAD ; 18 data PT_LOAD ;
19} 19}
@@ -24,14 +24,17 @@ SECTIONS
24 #define LOAD_OFFSET TEXT_OFFSET 24 #define LOAD_OFFSET TEXT_OFFSET
25 25
26 /* Interrupt vectors */ 26 /* Interrupt vectors */
27 .intrpt1 (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */ 27 .intrpt (LOAD_OFFSET) : AT ( 0 ) /* put at the start of physical memory */
28 { 28 {
29 _text = .; 29 _text = .;
30 *(.intrpt1) 30 *(.intrpt)
31 } :intrpt1 =0 31 } :intrpt =0
32 32
33 /* Hypervisor call vectors */ 33 /* Hypervisor call vectors */
34 #include "hvglue.lds" 34 . = ALIGN(0x10000);
35 .hvglue : AT (ADDR(.hvglue) - LOAD_OFFSET) {
36 *(.hvglue)
37 } :NONE
35 38
36 /* Now the real code */ 39 /* Now the real code */
37 . = ALIGN(0x20000); 40 . = ALIGN(0x20000);
@@ -40,7 +43,11 @@ SECTIONS
40 HEAD_TEXT 43 HEAD_TEXT
41 SCHED_TEXT 44 SCHED_TEXT
42 LOCK_TEXT 45 LOCK_TEXT
46 KPROBES_TEXT
47 IRQENTRY_TEXT
43 __fix_text_end = .; /* tile-cpack won't rearrange before this */ 48 __fix_text_end = .; /* tile-cpack won't rearrange before this */
49 ALIGN_FUNCTION();
50 *(.hottext*)
44 TEXT_TEXT 51 TEXT_TEXT
45 *(.text.*) 52 *(.text.*)
46 *(.coldtext*) 53 *(.coldtext*)
@@ -67,20 +74,8 @@ SECTIONS
67 __init_end = .; 74 __init_end = .;
68 75
69 _sdata = .; /* Start of data section */ 76 _sdata = .; /* Start of data section */
70
71 RO_DATA_SECTION(PAGE_SIZE) 77 RO_DATA_SECTION(PAGE_SIZE)
72
73 /* initially writeable, then read-only */
74 . = ALIGN(PAGE_SIZE);
75 __w1data_begin = .;
76 .w1data : AT(ADDR(.w1data) - LOAD_OFFSET) {
77 VMLINUX_SYMBOL(__w1data_begin) = .;
78 *(.w1data)
79 VMLINUX_SYMBOL(__w1data_end) = .;
80 }
81
82 RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) 78 RW_DATA_SECTION(L2_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
83
84 _edata = .; 79 _edata = .;
85 80
86 EXCEPTION_TABLE(L2_CACHE_BYTES) 81 EXCEPTION_TABLE(L2_CACHE_BYTES)
diff --git a/arch/tile/lib/Makefile b/arch/tile/lib/Makefile
index 985f59858234..c4211cbb2021 100644
--- a/arch/tile/lib/Makefile
+++ b/arch/tile/lib/Makefile
@@ -4,15 +4,15 @@
4 4
5lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \ 5lib-y = cacheflush.o checksum.o cpumask.o delay.o uaccess.o \
6 memmove.o memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \ 6 memmove.o memcpy_$(BITS).o memchr_$(BITS).o memset_$(BITS).o \
7 strchr_$(BITS).o strlen_$(BITS).o 7 strchr_$(BITS).o strlen_$(BITS).o strnlen_$(BITS).o
8
9ifeq ($(CONFIG_TILEGX),y)
10CFLAGS_REMOVE_memcpy_user_64.o = -fno-omit-frame-pointer
11lib-y += memcpy_user_64.o
12else
13lib-y += atomic_32.o atomic_asm_32.o memcpy_tile64.o
14endif
15 8
9lib-$(CONFIG_TILEGX) += memcpy_user_64.o
10lib-$(CONFIG_TILEPRO) += atomic_32.o atomic_asm_32.o
16lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o 11lib-$(CONFIG_SMP) += spinlock_$(BITS).o usercopy_$(BITS).o
17 12
18obj-$(CONFIG_MODULES) += exports.o 13obj-$(CONFIG_MODULES) += exports.o
14
15# The finv_buffer_remote() and copy_{to,from}_user() routines can't
16# have -pg added, since they both rely on being leaf functions.
17CFLAGS_REMOVE_cacheflush.o = -pg
18CFLAGS_REMOVE_memcpy_user_64.o = -pg
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index f5cada70c3c8..759efa337be8 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -20,50 +20,12 @@
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <arch/chip.h> 21#include <arch/chip.h>
22 22
23/* See <asm/atomic_32.h> */
24#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
25
26/*
27 * A block of memory containing locks for atomic ops. Each instance of this
28 * struct will be homed on a different CPU.
29 */
30struct atomic_locks_on_cpu {
31 int lock[ATOMIC_HASH_L2_SIZE];
32} __attribute__((aligned(ATOMIC_HASH_L2_SIZE * 4)));
33
34static DEFINE_PER_CPU(struct atomic_locks_on_cpu, atomic_lock_pool);
35
36/* The locks we'll use until __init_atomic_per_cpu is called. */
37static struct atomic_locks_on_cpu __initdata initial_atomic_locks;
38
39/* Hash into this vector to get a pointer to lock for the given atomic. */
40struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
41 __write_once = {
42 [0 ... ATOMIC_HASH_L1_SIZE-1] (&initial_atomic_locks)
43};
44
45#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
46
47/* This page is remapped on startup to be hash-for-home. */ 23/* This page is remapped on startup to be hash-for-home. */
48int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss; 24int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
49 25
50#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
51
52int *__atomic_hashed_lock(volatile void *v) 26int *__atomic_hashed_lock(volatile void *v)
53{ 27{
54 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */ 28 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
55#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
56 unsigned long i =
57 (unsigned long) v & ((PAGE_SIZE-1) & -sizeof(long long));
58 unsigned long n = __insn_crc32_32(0, i);
59
60 /* Grab high bits for L1 index. */
61 unsigned long l1_index = n >> ((sizeof(n) * 8) - ATOMIC_HASH_L1_SHIFT);
62 /* Grab low bits for L2 index. */
63 unsigned long l2_index = n & (ATOMIC_HASH_L2_SIZE - 1);
64
65 return &atomic_lock_ptr[l1_index]->lock[l2_index];
66#else
67 /* 29 /*
68 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index. 30 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
69 * Using mm works here because atomic_locks is page aligned. 31 * Using mm works here because atomic_locks is page aligned.
@@ -72,26 +34,13 @@ int *__atomic_hashed_lock(volatile void *v)
72 (unsigned long)atomic_locks, 34 (unsigned long)atomic_locks,
73 2, (ATOMIC_HASH_SHIFT + 2) - 1); 35 2, (ATOMIC_HASH_SHIFT + 2) - 1);
74 return (int *)ptr; 36 return (int *)ptr;
75#endif
76} 37}
77 38
78#ifdef CONFIG_SMP 39#ifdef CONFIG_SMP
79/* Return whether the passed pointer is a valid atomic lock pointer. */ 40/* Return whether the passed pointer is a valid atomic lock pointer. */
80static int is_atomic_lock(int *p) 41static int is_atomic_lock(int *p)
81{ 42{
82#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
83 int i;
84 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
85
86 if (p >= &atomic_lock_ptr[i]->lock[0] &&
87 p < &atomic_lock_ptr[i]->lock[ATOMIC_HASH_L2_SIZE]) {
88 return 1;
89 }
90 }
91 return 0;
92#else
93 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE]; 43 return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
94#endif
95} 44}
96 45
97void __atomic_fault_unlock(int *irqlock_word) 46void __atomic_fault_unlock(int *irqlock_word)
@@ -110,33 +59,32 @@ static inline int *__atomic_setup(volatile void *v)
110 return __atomic_hashed_lock(v); 59 return __atomic_hashed_lock(v);
111} 60}
112 61
113int _atomic_xchg(atomic_t *v, int n) 62int _atomic_xchg(int *v, int n)
114{ 63{
115 return __atomic_xchg(&v->counter, __atomic_setup(v), n).val; 64 return __atomic_xchg(v, __atomic_setup(v), n).val;
116} 65}
117EXPORT_SYMBOL(_atomic_xchg); 66EXPORT_SYMBOL(_atomic_xchg);
118 67
119int _atomic_xchg_add(atomic_t *v, int i) 68int _atomic_xchg_add(int *v, int i)
120{ 69{
121 return __atomic_xchg_add(&v->counter, __atomic_setup(v), i).val; 70 return __atomic_xchg_add(v, __atomic_setup(v), i).val;
122} 71}
123EXPORT_SYMBOL(_atomic_xchg_add); 72EXPORT_SYMBOL(_atomic_xchg_add);
124 73
125int _atomic_xchg_add_unless(atomic_t *v, int a, int u) 74int _atomic_xchg_add_unless(int *v, int a, int u)
126{ 75{
127 /* 76 /*
128 * Note: argument order is switched here since it is easier 77 * Note: argument order is switched here since it is easier
129 * to use the first argument consistently as the "old value" 78 * to use the first argument consistently as the "old value"
130 * in the assembly, as is done for _atomic_cmpxchg(). 79 * in the assembly, as is done for _atomic_cmpxchg().
131 */ 80 */
132 return __atomic_xchg_add_unless(&v->counter, __atomic_setup(v), u, a) 81 return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
133 .val;
134} 82}
135EXPORT_SYMBOL(_atomic_xchg_add_unless); 83EXPORT_SYMBOL(_atomic_xchg_add_unless);
136 84
137int _atomic_cmpxchg(atomic_t *v, int o, int n) 85int _atomic_cmpxchg(int *v, int o, int n)
138{ 86{
139 return __atomic_cmpxchg(&v->counter, __atomic_setup(v), o, n).val; 87 return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
140} 88}
141EXPORT_SYMBOL(_atomic_cmpxchg); 89EXPORT_SYMBOL(_atomic_cmpxchg);
142 90
@@ -159,33 +107,32 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
159EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
160 108
161 109
162u64 _atomic64_xchg(atomic64_t *v, u64 n) 110u64 _atomic64_xchg(u64 *v, u64 n)
163{ 111{
164 return __atomic64_xchg(&v->counter, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
165} 113}
166EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
167 115
168u64 _atomic64_xchg_add(atomic64_t *v, u64 i) 116u64 _atomic64_xchg_add(u64 *v, u64 i)
169{ 117{
170 return __atomic64_xchg_add(&v->counter, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
171} 119}
172EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
173 121
174u64 _atomic64_xchg_add_unless(atomic64_t *v, u64 a, u64 u) 122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
175{ 123{
176 /* 124 /*
177 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
178 * to use the first argument consistently as the "old value" 126 * to use the first argument consistently as the "old value"
179 * in the assembly, as is done for _atomic_cmpxchg(). 127 * in the assembly, as is done for _atomic_cmpxchg().
180 */ 128 */
181 return __atomic64_xchg_add_unless(&v->counter, __atomic_setup(v), 129 return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
182 u, a);
183} 130}
184EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
185 132
186u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
187{ 134{
188 return __atomic64_cmpxchg(&v->counter, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
189} 136}
190EXPORT_SYMBOL(_atomic64_cmpxchg); 137EXPORT_SYMBOL(_atomic64_cmpxchg);
191 138
@@ -208,54 +155,8 @@ struct __get_user __atomic_bad_address(int __user *addr)
208} 155}
209 156
210 157
211#if CHIP_HAS_CBOX_HOME_MAP()
212static int __init noatomichash(char *str)
213{
214 pr_warning("noatomichash is deprecated.\n");
215 return 1;
216}
217__setup("noatomichash", noatomichash);
218#endif
219
220void __init __init_atomic_per_cpu(void) 158void __init __init_atomic_per_cpu(void)
221{ 159{
222#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
223
224 unsigned int i;
225 int actual_cpu;
226
227 /*
228 * Before this is called from setup, we just have one lock for
229 * all atomic objects/operations. Here we replace the
230 * elements of atomic_lock_ptr so that they point at per_cpu
231 * integers. This seemingly over-complex approach stems from
232 * the fact that DEFINE_PER_CPU defines an entry for each cpu
233 * in the grid, not each cpu from 0..ATOMIC_HASH_SIZE-1. But
234 * for efficient hashing of atomics to their locks we want a
235 * compile time constant power of 2 for the size of this
236 * table, so we use ATOMIC_HASH_SIZE.
237 *
238 * Here we populate atomic_lock_ptr from the per cpu
239 * atomic_lock_pool, interspersing by actual cpu so that
240 * subsequent elements are homed on consecutive cpus.
241 */
242
243 actual_cpu = cpumask_first(cpu_possible_mask);
244
245 for (i = 0; i < ATOMIC_HASH_L1_SIZE; ++i) {
246 /*
247 * Preincrement to slightly bias against using cpu 0,
248 * which has plenty of stuff homed on it already.
249 */
250 actual_cpu = cpumask_next(actual_cpu, cpu_possible_mask);
251 if (actual_cpu >= nr_cpu_ids)
252 actual_cpu = cpumask_first(cpu_possible_mask);
253
254 atomic_lock_ptr[i] = &per_cpu(atomic_lock_pool, actual_cpu);
255 }
256
257#else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
258
259 /* Validate power-of-two and "bigger than cpus" assumption */ 160 /* Validate power-of-two and "bigger than cpus" assumption */
260 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1)); 161 BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
261 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids); 162 BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
@@ -279,6 +180,4 @@ void __init __init_atomic_per_cpu(void)
279 * That should not produce more indices than ATOMIC_HASH_SIZE. 180 * That should not produce more indices than ATOMIC_HASH_SIZE.
280 */ 181 */
281 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); 182 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
282
283#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
284} 183}
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S
index 30638042691d..6bda3132cd61 100644
--- a/arch/tile/lib/atomic_asm_32.S
+++ b/arch/tile/lib/atomic_asm_32.S
@@ -164,6 +164,7 @@ STD_ENTRY_SECTION(__atomic\name, .text.atomic)
164 STD_ENDPROC(__atomic\name) 164 STD_ENDPROC(__atomic\name)
165 .ifc \bitwidth,32 165 .ifc \bitwidth,32
166 .pushsection __ex_table,"a" 166 .pushsection __ex_table,"a"
167 .align 4
167 .word 1b, __atomic\name 168 .word 1b, __atomic\name
168 .word 2b, __atomic\name 169 .word 2b, __atomic\name
169 .word __atomic\name, __atomic_bad_address 170 .word __atomic\name, __atomic_bad_address
diff --git a/arch/tile/lib/cacheflush.c b/arch/tile/lib/cacheflush.c
index 8f8ad814b139..9c0ec22009a5 100644
--- a/arch/tile/lib/cacheflush.c
+++ b/arch/tile/lib/cacheflush.c
@@ -36,7 +36,8 @@ static inline void force_load(char *p)
36 * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting 36 * core (if "!hfh") or homed via hash-for-home (if "hfh"), waiting
37 * until the memory controller holds the flushed values. 37 * until the memory controller holds the flushed values.
38 */ 38 */
39void finv_buffer_remote(void *buffer, size_t size, int hfh) 39void __attribute__((optimize("omit-frame-pointer")))
40finv_buffer_remote(void *buffer, size_t size, int hfh)
40{ 41{
41 char *p, *base; 42 char *p, *base;
42 size_t step_size, load_count; 43 size_t step_size, load_count;
@@ -147,18 +148,21 @@ void finv_buffer_remote(void *buffer, size_t size, int hfh)
147 force_load(p); 148 force_load(p);
148 149
149 /* 150 /*
150 * Repeat, but with inv's instead of loads, to get rid of the 151 * Repeat, but with finv's instead of loads, to get rid of the
151 * data we just loaded into our own cache and the old home L3. 152 * data we just loaded into our own cache and the old home L3.
152 * No need to unroll since inv's don't target a register. 153 * No need to unroll since finv's don't target a register.
154 * The finv's are guaranteed not to actually flush the data in
155 * the buffer back to their home, since we just read it, so the
156 * lines are clean in cache; we will only invalidate those lines.
153 */ 157 */
154 p = (char *)buffer + size - 1; 158 p = (char *)buffer + size - 1;
155 __insn_inv(p); 159 __insn_finv(p);
156 p -= step_size; 160 p -= step_size;
157 p = (char *)((unsigned long)p | (step_size - 1)); 161 p = (char *)((unsigned long)p | (step_size - 1));
158 for (; p >= base; p -= step_size) 162 for (; p >= base; p -= step_size)
159 __insn_inv(p); 163 __insn_finv(p);
160 164
161 /* Wait for the load+inv's (and thus finvs) to have completed. */ 165 /* Wait for these finv's (and thus the first finvs) to be done. */
162 __insn_mf(); 166 __insn_mf();
163 167
164#ifdef __tilegx__ 168#ifdef __tilegx__
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index a93b02a25222..82733c87d67e 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -22,7 +22,6 @@ EXPORT_SYMBOL(strnlen_user_asm);
22EXPORT_SYMBOL(strncpy_from_user_asm); 22EXPORT_SYMBOL(strncpy_from_user_asm);
23EXPORT_SYMBOL(clear_user_asm); 23EXPORT_SYMBOL(clear_user_asm);
24EXPORT_SYMBOL(flush_user_asm); 24EXPORT_SYMBOL(flush_user_asm);
25EXPORT_SYMBOL(inv_user_asm);
26EXPORT_SYMBOL(finv_user_asm); 25EXPORT_SYMBOL(finv_user_asm);
27 26
28/* arch/tile/kernel/entry.S */ 27/* arch/tile/kernel/entry.S */
@@ -34,6 +33,12 @@ EXPORT_SYMBOL(dump_stack);
34/* arch/tile/kernel/head.S */ 33/* arch/tile/kernel/head.S */
35EXPORT_SYMBOL(empty_zero_page); 34EXPORT_SYMBOL(empty_zero_page);
36 35
36#ifdef CONFIG_FUNCTION_TRACER
37/* arch/tile/kernel/mcount_64.S */
38#include <asm/ftrace.h>
39EXPORT_SYMBOL(__mcount);
40#endif /* CONFIG_FUNCTION_TRACER */
41
37/* arch/tile/lib/, various memcpy files */ 42/* arch/tile/lib/, various memcpy files */
38EXPORT_SYMBOL(memcpy); 43EXPORT_SYMBOL(memcpy);
39EXPORT_SYMBOL(__copy_to_user_inatomic); 44EXPORT_SYMBOL(__copy_to_user_inatomic);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
index 6f867dbf7c56..f8196b3a950e 100644
--- a/arch/tile/lib/memchr_64.c
+++ b/arch/tile/lib/memchr_64.c
@@ -36,7 +36,7 @@ void *memchr(const void *s, int c, size_t n)
36 p = (const uint64_t *)(s_int & -8); 36 p = (const uint64_t *)(s_int & -8);
37 37
38 /* Create eight copies of the byte for which we are looking. */ 38 /* Create eight copies of the byte for which we are looking. */
39 goal = 0x0101010101010101ULL * (uint8_t) c; 39 goal = copy_byte(c);
40 40
41 /* Read the first word, but munge it so that bytes before the array 41 /* Read the first word, but munge it so that bytes before the array
42 * will not match goal. 42 * will not match goal.
diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S
index 2a419a6122db..a2771ae5da53 100644
--- a/arch/tile/lib/memcpy_32.S
+++ b/arch/tile/lib/memcpy_32.S
@@ -22,14 +22,6 @@
22 22
23#include <linux/linkage.h> 23#include <linux/linkage.h>
24 24
25/* On TILE64, we wrap these functions via arch/tile/lib/memcpy_tile64.c */
26#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
27#define memcpy __memcpy_asm
28#define __copy_to_user_inatomic __copy_to_user_inatomic_asm
29#define __copy_from_user_inatomic __copy_from_user_inatomic_asm
30#define __copy_from_user_zeroing __copy_from_user_zeroing_asm
31#endif
32
33#define IS_MEMCPY 0 25#define IS_MEMCPY 0
34#define IS_COPY_FROM_USER 1 26#define IS_COPY_FROM_USER 1
35#define IS_COPY_FROM_USER_ZEROING 2 27#define IS_COPY_FROM_USER_ZEROING 2
@@ -44,6 +36,7 @@
44 */ 36 */
45#define EX \ 37#define EX \
46 .pushsection __ex_table, "a"; \ 38 .pushsection __ex_table, "a"; \
39 .align 4; \
47 .word 9f, memcpy_common_fixup; \ 40 .word 9f, memcpy_common_fixup; \
48 .popsection; \ 41 .popsection; \
49 9 42 9
@@ -158,12 +151,9 @@ EX: { sw r0, r3; addi r0, r0, 4; addi r2, r2, -4 }
158 151
159 { addi r3, r1, 60; andi r9, r9, -64 } 152 { addi r3, r1, 60; andi r9, r9, -64 }
160 153
161#if CHIP_HAS_WH64()
162 /* No need to prefetch dst, we'll just do the wh64 154 /* No need to prefetch dst, we'll just do the wh64
163 * right before we copy a line. 155 * right before we copy a line.
164 */ 156 */
165#endif
166
167EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 } 157EX: { lw r5, r3; addi r3, r3, 64; movei r4, 1 }
168 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 158 /* Intentionally stall for a few cycles to leave L2 cache alone. */
169 { bnzt zero, .; move r27, lr } 159 { bnzt zero, .; move r27, lr }
@@ -171,21 +161,6 @@ EX: { lw r6, r3; addi r3, r3, 64 }
171 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 161 /* Intentionally stall for a few cycles to leave L2 cache alone. */
172 { bnzt zero, . } 162 { bnzt zero, . }
173EX: { lw r7, r3; addi r3, r3, 64 } 163EX: { lw r7, r3; addi r3, r3, 64 }
174#if !CHIP_HAS_WH64()
175 /* Prefetch the dest */
176 /* Intentionally stall for a few cycles to leave L2 cache alone. */
177 { bnzt zero, . }
178 /* Use a real load to cause a TLB miss if necessary. We aren't using
179 * r28, so this should be fine.
180 */
181EX: { lw r28, r9; addi r9, r9, 64 }
182 /* Intentionally stall for a few cycles to leave L2 cache alone. */
183 { bnzt zero, . }
184 { prefetch r9; addi r9, r9, 64 }
185 /* Intentionally stall for a few cycles to leave L2 cache alone. */
186 { bnzt zero, . }
187 { prefetch r9; addi r9, r9, 64 }
188#endif
189 /* Intentionally stall for a few cycles to leave L2 cache alone. */ 164 /* Intentionally stall for a few cycles to leave L2 cache alone. */
190 { bz zero, .Lbig_loop2 } 165 { bz zero, .Lbig_loop2 }
191 166
@@ -286,13 +261,8 @@ EX: { lw r7, r3; addi r3, r3, 64 }
286 /* Fill second L1D line. */ 261 /* Fill second L1D line. */
287EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */ 262EX: { lw r17, r17; addi r1, r1, 48; mvz r3, r13, r1 } /* r17 = WORD_4 */
288 263
289#if CHIP_HAS_WH64()
290 /* Prepare destination line for writing. */ 264 /* Prepare destination line for writing. */
291EX: { wh64 r9; addi r9, r9, 64 } 265EX: { wh64 r9; addi r9, r9, 64 }
292#else
293 /* Prefetch dest line */
294 { prefetch r9; addi r9, r9, 64 }
295#endif
296 /* Load seven words that are L1D hits to cover wh64 L2 usage. */ 266 /* Load seven words that are L1D hits to cover wh64 L2 usage. */
297 267
298 /* Load the three remaining words from the last L1D line, which 268 /* Load the three remaining words from the last L1D line, which
@@ -330,16 +300,7 @@ EX: { lw r18, r1; addi r1, r1, 4 } /* r18 = WORD_8 */
330EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */ 300EX: { sw r0, r16; addi r0, r0, 4; add r16, r0, r2 } /* store(WORD_0) */
331EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */ 301EX: { sw r0, r13; addi r0, r0, 4; andi r16, r16, -64 } /* store(WORD_1) */
332EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */ 302EX: { sw r0, r14; addi r0, r0, 4; slt_u r16, r9, r16 } /* store(WORD_2) */
333#if CHIP_HAS_WH64()
334EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */ 303EX: { sw r0, r15; addi r0, r0, 4; addi r13, sp, -64 } /* store(WORD_3) */
335#else
336 /* Back up the r9 to a cache line we are already storing to
337 * if it gets past the end of the dest vector. Strictly speaking,
338 * we don't need to back up to the start of a cache line, but it's free
339 * and tidy, so why not?
340 */
341EX: { sw r0, r15; addi r0, r0, 4; andi r13, r0, -64 } /* store(WORD_3) */
342#endif
343 /* Store second L1D line. */ 304 /* Store second L1D line. */
344EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */ 305EX: { sw r0, r17; addi r0, r0, 4; mvz r9, r16, r13 }/* store(WORD_4) */
345EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */ 306EX: { sw r0, r19; addi r0, r0, 4 } /* store(WORD_5) */
@@ -403,7 +364,6 @@ EX: { sb r0, r3; addi r0, r0, 1; addi r2, r2, -1 }
403 364
404.Ldest_is_word_aligned: 365.Ldest_is_word_aligned:
405 366
406#if CHIP_HAS_DWORD_ALIGN()
407EX: { andi r8, r0, 63; lwadd_na r6, r1, 4} 367EX: { andi r8, r0, 63; lwadd_na r6, r1, 4}
408 { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned } 368 { slti_u r9, r2, 64; bz r8, .Ldest_is_L2_line_aligned }
409 369
@@ -511,26 +471,6 @@ EX: { swadd r0, r13, 4; addi r2, r2, -32 }
511 /* Move r1 back to the point where it corresponds to r0. */ 471 /* Move r1 back to the point where it corresponds to r0. */
512 { addi r1, r1, -4 } 472 { addi r1, r1, -4 }
513 473
514#else /* !CHIP_HAS_DWORD_ALIGN() */
515
516 /* Compute right/left shift counts and load initial source words. */
517 { andi r5, r1, -4; andi r3, r1, 3 }
518EX: { lw r6, r5; addi r5, r5, 4; shli r3, r3, 3 }
519EX: { lw r7, r5; addi r5, r5, 4; sub r4, zero, r3 }
520
521 /* Load and store one word at a time, using shifts and ORs
522 * to correct for the misaligned src.
523 */
524.Lcopy_unaligned_src_loop:
525 { shr r6, r6, r3; shl r8, r7, r4 }
526EX: { lw r7, r5; or r8, r8, r6; move r6, r7 }
527EX: { sw r0, r8; addi r0, r0, 4; addi r2, r2, -4 }
528 { addi r5, r5, 4; slti_u r8, r2, 8 }
529 { bzt r8, .Lcopy_unaligned_src_loop; addi r1, r1, 4 }
530
531 { bz r2, .Lcopy_unaligned_done }
532#endif /* !CHIP_HAS_DWORD_ALIGN() */
533
534 /* Fall through */ 474 /* Fall through */
535 475
536/* 476/*
@@ -614,5 +554,6 @@ memcpy_fixup_loop:
614 .size memcpy_common_fixup, . - memcpy_common_fixup 554 .size memcpy_common_fixup, . - memcpy_common_fixup
615 555
616 .section __ex_table,"a" 556 .section __ex_table,"a"
557 .align 4
617 .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder 558 .word .Lcfu, .Lcopy_from_user_fixup_zero_remainder
618 .word .Lctu, .Lcopy_to_user_fixup_done 559 .word .Lctu, .Lcopy_to_user_fixup_done
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
index c79b8e7c6828..4815354b8cd2 100644
--- a/arch/tile/lib/memcpy_64.c
+++ b/arch/tile/lib/memcpy_64.c
@@ -18,14 +18,17 @@
18/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */ 18/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
19 19
20/* Must be 8 bytes in size. */ 20/* Must be 8 bytes in size. */
21#define word_t uint64_t 21#define op_t uint64_t
22 22
23#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128 23/* Threshold value for when to enter the unrolled loops. */
24#error "Assumes 64 or 128 byte line size" 24#define OP_T_THRES 16
25
26#if CHIP_L2_LINE_SIZE() != 64
27#error "Assumes 64 byte line size"
25#endif 28#endif
26 29
27/* How many cache lines ahead should we prefetch? */ 30/* How many cache lines ahead should we prefetch? */
28#define PREFETCH_LINES_AHEAD 3 31#define PREFETCH_LINES_AHEAD 4
29 32
30/* 33/*
31 * Provide "base versions" of load and store for the normal code path. 34 * Provide "base versions" of load and store for the normal code path.
@@ -51,15 +54,16 @@ void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
51 * macros to return a count of uncopied bytes due to mm fault. 54 * macros to return a count of uncopied bytes due to mm fault.
52 */ 55 */
53#define RETVAL 0 56#define RETVAL 0
54int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n) 57int __attribute__((optimize("omit-frame-pointer")))
58USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
55#endif 59#endif
56{ 60{
57 char *__restrict dst1 = (char *)dstv; 61 char *__restrict dst1 = (char *)dstv;
58 const char *__restrict src1 = (const char *)srcv; 62 const char *__restrict src1 = (const char *)srcv;
59 const char *__restrict src1_end; 63 const char *__restrict src1_end;
60 const char *__restrict prefetch; 64 const char *__restrict prefetch;
61 word_t *__restrict dst8; /* 8-byte pointer to destination memory. */ 65 op_t *__restrict dst8; /* 8-byte pointer to destination memory. */
62 word_t final; /* Final bytes to write to trailing word, if any */ 66 op_t final; /* Final bytes to write to trailing word, if any */
63 long i; 67 long i;
64 68
65 if (n < 16) { 69 if (n < 16) {
@@ -79,104 +83,228 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
79 for (i = 0; i < PREFETCH_LINES_AHEAD; i++) { 83 for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
80 __insn_prefetch(prefetch); 84 __insn_prefetch(prefetch);
81 prefetch += CHIP_L2_LINE_SIZE(); 85 prefetch += CHIP_L2_LINE_SIZE();
82 prefetch = (prefetch > src1_end) ? prefetch : src1; 86 prefetch = (prefetch < src1_end) ? prefetch : src1;
83 } 87 }
84 88
85 /* Copy bytes until dst is word-aligned. */ 89 /* Copy bytes until dst is word-aligned. */
86 for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--) 90 for (; (uintptr_t)dst1 & (sizeof(op_t) - 1); n--)
87 ST1(dst1++, LD1(src1++)); 91 ST1(dst1++, LD1(src1++));
88 92
89 /* 8-byte pointer to destination memory. */ 93 /* 8-byte pointer to destination memory. */
90 dst8 = (word_t *)dst1; 94 dst8 = (op_t *)dst1;
91 95
92 if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) { 96 if (__builtin_expect((uintptr_t)src1 & (sizeof(op_t) - 1), 0)) {
93 /* 97 /* Unaligned copy. */
94 * Misaligned copy. Copy 8 bytes at a time, but don't 98
95 * bother with other fanciness. 99 op_t tmp0 = 0, tmp1 = 0, tmp2, tmp3;
96 * 100 const op_t *src8 = (const op_t *) ((uintptr_t)src1 &
97 * TODO: Consider prefetching and using wh64 as well. 101 -sizeof(op_t));
98 */ 102 const void *srci = (void *)src1;
99 103 int m;
100 /* Create an aligned src8. */ 104
101 const word_t *__restrict src8 = 105 m = (CHIP_L2_LINE_SIZE() << 2) -
102 (const word_t *)((uintptr_t)src1 & -sizeof(word_t)); 106 (((uintptr_t)dst8) & ((CHIP_L2_LINE_SIZE() << 2) - 1));
103 word_t b; 107 m = (n < m) ? n : m;
104 108 m /= sizeof(op_t);
105 word_t a = LD8(src8++); 109
106 for (; n >= sizeof(word_t); n -= sizeof(word_t)) { 110 /* Copy until 'dst' is cache-line-aligned. */
107 b = LD8(src8++); 111 n -= (sizeof(op_t) * m);
108 a = __insn_dblalign(a, b, src1); 112
109 ST8(dst8++, a); 113 switch (m % 4) {
110 a = b; 114 case 0:
115 if (__builtin_expect(!m, 0))
116 goto _M0;
117 tmp1 = LD8(src8++);
118 tmp2 = LD8(src8++);
119 goto _8B3;
120 case 2:
121 m += 2;
122 tmp3 = LD8(src8++);
123 tmp0 = LD8(src8++);
124 goto _8B1;
125 case 3:
126 m += 1;
127 tmp2 = LD8(src8++);
128 tmp3 = LD8(src8++);
129 goto _8B2;
130 case 1:
131 m--;
132 tmp0 = LD8(src8++);
133 tmp1 = LD8(src8++);
134 if (__builtin_expect(!m, 0))
135 goto _8B0;
136 }
137
138 do {
139 tmp2 = LD8(src8++);
140 tmp0 = __insn_dblalign(tmp0, tmp1, srci);
141 ST8(dst8++, tmp0);
142_8B3:
143 tmp3 = LD8(src8++);
144 tmp1 = __insn_dblalign(tmp1, tmp2, srci);
145 ST8(dst8++, tmp1);
146_8B2:
147 tmp0 = LD8(src8++);
148 tmp2 = __insn_dblalign(tmp2, tmp3, srci);
149 ST8(dst8++, tmp2);
150_8B1:
151 tmp1 = LD8(src8++);
152 tmp3 = __insn_dblalign(tmp3, tmp0, srci);
153 ST8(dst8++, tmp3);
154 m -= 4;
155 } while (m);
156
157_8B0:
158 tmp0 = __insn_dblalign(tmp0, tmp1, srci);
159 ST8(dst8++, tmp0);
160 src8--;
161
162_M0:
163 if (__builtin_expect(n >= CHIP_L2_LINE_SIZE(), 0)) {
164 op_t tmp4, tmp5, tmp6, tmp7, tmp8;
165
166 prefetch = ((const char *)src8) +
167 CHIP_L2_LINE_SIZE() * PREFETCH_LINES_AHEAD;
168
169 for (tmp0 = LD8(src8++); n >= CHIP_L2_LINE_SIZE();
170 n -= CHIP_L2_LINE_SIZE()) {
171 /* Prefetch and advance to next line to
172 prefetch, but don't go past the end. */
173 __insn_prefetch(prefetch);
174
175 /* Make sure prefetch got scheduled
176 earlier. */
177 __asm__ ("" : : : "memory");
178
179 prefetch += CHIP_L2_LINE_SIZE();
180 prefetch = (prefetch < src1_end) ? prefetch :
181 (const char *) src8;
182
183 tmp1 = LD8(src8++);
184 tmp2 = LD8(src8++);
185 tmp3 = LD8(src8++);
186 tmp4 = LD8(src8++);
187 tmp5 = LD8(src8++);
188 tmp6 = LD8(src8++);
189 tmp7 = LD8(src8++);
190 tmp8 = LD8(src8++);
191
192 tmp0 = __insn_dblalign(tmp0, tmp1, srci);
193 tmp1 = __insn_dblalign(tmp1, tmp2, srci);
194 tmp2 = __insn_dblalign(tmp2, tmp3, srci);
195 tmp3 = __insn_dblalign(tmp3, tmp4, srci);
196 tmp4 = __insn_dblalign(tmp4, tmp5, srci);
197 tmp5 = __insn_dblalign(tmp5, tmp6, srci);
198 tmp6 = __insn_dblalign(tmp6, tmp7, srci);
199 tmp7 = __insn_dblalign(tmp7, tmp8, srci);
200
201 __insn_wh64(dst8);
202
203 ST8(dst8++, tmp0);
204 ST8(dst8++, tmp1);
205 ST8(dst8++, tmp2);
206 ST8(dst8++, tmp3);
207 ST8(dst8++, tmp4);
208 ST8(dst8++, tmp5);
209 ST8(dst8++, tmp6);
210 ST8(dst8++, tmp7);
211
212 tmp0 = tmp8;
213 }
214 src8--;
215 }
216
217 /* Copy the rest 8-byte chunks. */
218 if (n >= sizeof(op_t)) {
219 tmp0 = LD8(src8++);
220 for (; n >= sizeof(op_t); n -= sizeof(op_t)) {
221 tmp1 = LD8(src8++);
222 tmp0 = __insn_dblalign(tmp0, tmp1, srci);
223 ST8(dst8++, tmp0);
224 tmp0 = tmp1;
225 }
226 src8--;
111 } 227 }
112 228
113 if (n == 0) 229 if (n == 0)
114 return RETVAL; 230 return RETVAL;
115 231
116 b = ((const char *)src8 <= src1_end) ? *src8 : 0; 232 tmp0 = LD8(src8++);
233 tmp1 = ((const char *)src8 <= src1_end)
234 ? LD8((op_t *)src8) : 0;
235 final = __insn_dblalign(tmp0, tmp1, srci);
117 236
118 /*
119 * Final source bytes to write to trailing partial
120 * word, if any.
121 */
122 final = __insn_dblalign(a, b, src1);
123 } else { 237 } else {
124 /* Aligned copy. */ 238 /* Aligned copy. */
125 239
126 const word_t* __restrict src8 = (const word_t *)src1; 240 const op_t *__restrict src8 = (const op_t *)src1;
127 241
128 /* src8 and dst8 are both word-aligned. */ 242 /* src8 and dst8 are both word-aligned. */
129 if (n >= CHIP_L2_LINE_SIZE()) { 243 if (n >= CHIP_L2_LINE_SIZE()) {
130 /* Copy until 'dst' is cache-line-aligned. */ 244 /* Copy until 'dst' is cache-line-aligned. */
131 for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1); 245 for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
132 n -= sizeof(word_t)) 246 n -= sizeof(op_t))
133 ST8(dst8++, LD8(src8++)); 247 ST8(dst8++, LD8(src8++));
134 248
135 for (; n >= CHIP_L2_LINE_SIZE(); ) { 249 for (; n >= CHIP_L2_LINE_SIZE(); ) {
136 __insn_wh64(dst8); 250 op_t tmp0, tmp1, tmp2, tmp3;
251 op_t tmp4, tmp5, tmp6, tmp7;
137 252
138 /* 253 /*
139 * Prefetch and advance to next line 254 * Prefetch and advance to next line
140 * to prefetch, but don't go past the end 255 * to prefetch, but don't go past the
256 * end.
141 */ 257 */
142 __insn_prefetch(prefetch); 258 __insn_prefetch(prefetch);
259
260 /* Make sure prefetch got scheduled
261 earlier. */
262 __asm__ ("" : : : "memory");
263
143 prefetch += CHIP_L2_LINE_SIZE(); 264 prefetch += CHIP_L2_LINE_SIZE();
144 prefetch = (prefetch > src1_end) ? prefetch : 265 prefetch = (prefetch < src1_end) ? prefetch :
145 (const char *)src8; 266 (const char *)src8;
146 267
147 /* 268 /*
148 * Copy an entire cache line. Manually 269 * Do all the loads before wh64. This
149 * unrolled to avoid idiosyncracies of 270 * is necessary if [src8, src8+7] and
150 * compiler unrolling. 271 * [dst8, dst8+7] share the same cache
272 * line and dst8 <= src8, as can be
273 * the case when called from memmove,
274 * or with code tested on x86 whose
275 * memcpy always works with forward
276 * copies.
151 */ 277 */
152#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; }) 278 tmp0 = LD8(src8++);
153 COPY_WORD(0); 279 tmp1 = LD8(src8++);
154 COPY_WORD(1); 280 tmp2 = LD8(src8++);
155 COPY_WORD(2); 281 tmp3 = LD8(src8++);
156 COPY_WORD(3); 282 tmp4 = LD8(src8++);
157 COPY_WORD(4); 283 tmp5 = LD8(src8++);
158 COPY_WORD(5); 284 tmp6 = LD8(src8++);
159 COPY_WORD(6); 285 tmp7 = LD8(src8++);
160 COPY_WORD(7); 286
161#if CHIP_L2_LINE_SIZE() == 128 287 /* wh64 and wait for tmp7 load completion. */
162 COPY_WORD(8); 288 __asm__ ("move %0, %0; wh64 %1\n"
163 COPY_WORD(9); 289 : : "r"(tmp7), "r"(dst8));
164 COPY_WORD(10);
165 COPY_WORD(11);
166 COPY_WORD(12);
167 COPY_WORD(13);
168 COPY_WORD(14);
169 COPY_WORD(15);
170#elif CHIP_L2_LINE_SIZE() != 64
171# error Fix code that assumes particular L2 cache line sizes
172#endif
173 290
174 dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t); 291 ST8(dst8++, tmp0);
175 src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t); 292 ST8(dst8++, tmp1);
293 ST8(dst8++, tmp2);
294 ST8(dst8++, tmp3);
295 ST8(dst8++, tmp4);
296 ST8(dst8++, tmp5);
297 ST8(dst8++, tmp6);
298 ST8(dst8++, tmp7);
299
300 n -= CHIP_L2_LINE_SIZE();
176 } 301 }
302#if CHIP_L2_LINE_SIZE() != 64
303# error "Fix code that assumes particular L2 cache line size."
304#endif
177 } 305 }
178 306
179 for (; n >= sizeof(word_t); n -= sizeof(word_t)) 307 for (; n >= sizeof(op_t); n -= sizeof(op_t))
180 ST8(dst8++, LD8(src8++)); 308 ST8(dst8++, LD8(src8++));
181 309
182 if (__builtin_expect(n == 0, 1)) 310 if (__builtin_expect(n == 0, 1))
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
deleted file mode 100644
index 3bc4b4e40d93..000000000000
--- a/arch/tile/lib/memcpy_tile64.c
+++ /dev/null
@@ -1,276 +0,0 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/string.h>
16#include <linux/smp.h>
17#include <linux/module.h>
18#include <linux/uaccess.h>
19#include <asm/fixmap.h>
20#include <asm/kmap_types.h>
21#include <asm/tlbflush.h>
22#include <hv/hypervisor.h>
23#include <arch/chip.h>
24
25
26#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
27
28/* Defined in memcpy.S */
29extern unsigned long __memcpy_asm(void *to, const void *from, unsigned long n);
30extern unsigned long __copy_to_user_inatomic_asm(
31 void __user *to, const void *from, unsigned long n);
32extern unsigned long __copy_from_user_inatomic_asm(
33 void *to, const void __user *from, unsigned long n);
34extern unsigned long __copy_from_user_zeroing_asm(
35 void *to, const void __user *from, unsigned long n);
36
37typedef unsigned long (*memcpy_t)(void *, const void *, unsigned long);
38
39/* Size above which to consider TLB games for performance */
40#define LARGE_COPY_CUTOFF 2048
41
42/* Communicate to the simulator what we are trying to do. */
43#define sim_allow_multiple_caching(b) \
44 __insn_mtspr(SPR_SIM_CONTROL, \
45 SIM_CONTROL_ALLOW_MULTIPLE_CACHING | ((b) << _SIM_CONTROL_OPERATOR_BITS))
46
47/*
48 * Copy memory by briefly enabling incoherent cacheline-at-a-time mode.
49 *
50 * We set up our own source and destination PTEs that we fully control.
51 * This is the only way to guarantee that we don't race with another
52 * thread that is modifying the PTE; we can't afford to try the
53 * copy_{to,from}_user() technique of catching the interrupt, since
54 * we must run with interrupts disabled to avoid the risk of some
55 * other code seeing the incoherent data in our cache. (Recall that
56 * our cache is indexed by PA, so even if the other code doesn't use
57 * our kmap_atomic virtual addresses, they'll still hit in cache using
58 * the normal VAs that aren't supposed to hit in cache.)
59 */
60static void memcpy_multicache(void *dest, const void *source,
61 pte_t dst_pte, pte_t src_pte, int len)
62{
63 int idx;
64 unsigned long flags, newsrc, newdst;
65 pmd_t *pmdp;
66 pte_t *ptep;
67 int type0, type1;
68 int cpu = get_cpu();
69
70 /*
71 * Disable interrupts so that we don't recurse into memcpy()
72 * in an interrupt handler, nor accidentally reference
73 * the PA of the source from an interrupt routine. Also
74 * notify the simulator that we're playing games so we don't
75 * generate spurious coherency warnings.
76 */
77 local_irq_save(flags);
78 sim_allow_multiple_caching(1);
79
80 /* Set up the new dest mapping */
81 type0 = kmap_atomic_idx_push();
82 idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
83 newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
84 pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
85 ptep = pte_offset_kernel(pmdp, newdst);
86 if (pte_val(*ptep) != pte_val(dst_pte)) {
87 set_pte(ptep, dst_pte);
88 local_flush_tlb_page(NULL, newdst, PAGE_SIZE);
89 }
90
91 /* Set up the new source mapping */
92 type1 = kmap_atomic_idx_push();
93 idx += (type0 - type1);
94 src_pte = hv_pte_set_nc(src_pte);
95 src_pte = hv_pte_clear_writable(src_pte); /* be paranoid */
96 newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
97 pmdp = pmd_offset(pud_offset(pgd_offset_k(newsrc), newsrc), newsrc);
98 ptep = pte_offset_kernel(pmdp, newsrc);
99 __set_pte(ptep, src_pte); /* set_pte() would be confused by this */
100 local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
101
102 /* Actually move the data. */
103 __memcpy_asm((void *)newdst, (const void *)newsrc, len);
104
105 /*
106 * Remap the source as locally-cached and not OLOC'ed so that
107 * we can inval without also invaling the remote cpu's cache.
108 * This also avoids known errata with inv'ing cacheable oloc data.
109 */
110 src_pte = hv_pte_set_mode(src_pte, HV_PTE_MODE_CACHE_NO_L3);
111 src_pte = hv_pte_set_writable(src_pte); /* need write access for inv */
112 __set_pte(ptep, src_pte); /* set_pte() would be confused by this */
113 local_flush_tlb_page(NULL, newsrc, PAGE_SIZE);
114
115 /*
116 * Do the actual invalidation, covering the full L2 cache line
117 * at the end since __memcpy_asm() is somewhat aggressive.
118 */
119 __inv_buffer((void *)newsrc, len);
120
121 /*
122 * We're done: notify the simulator that all is back to normal,
123 * and re-enable interrupts and pre-emption.
124 */
125 kmap_atomic_idx_pop();
126 kmap_atomic_idx_pop();
127 sim_allow_multiple_caching(0);
128 local_irq_restore(flags);
129 put_cpu();
130}
131
132/*
133 * Identify large copies from remotely-cached memory, and copy them
134 * via memcpy_multicache() if they look good, otherwise fall back
135 * to the particular kind of copying passed as the memcpy_t function.
136 */
137static unsigned long fast_copy(void *dest, const void *source, int len,
138 memcpy_t func)
139{
140 /*
141 * Check if it's big enough to bother with. We may end up doing a
142 * small copy via TLB manipulation if we're near a page boundary,
143 * but presumably we'll make it up when we hit the second page.
144 */
145 while (len >= LARGE_COPY_CUTOFF) {
146 int copy_size, bytes_left_on_page;
147 pte_t *src_ptep, *dst_ptep;
148 pte_t src_pte, dst_pte;
149 struct page *src_page, *dst_page;
150
151 /* Is the source page oloc'ed to a remote cpu? */
152retry_source:
153 src_ptep = virt_to_pte(current->mm, (unsigned long)source);
154 if (src_ptep == NULL)
155 break;
156 src_pte = *src_ptep;
157 if (!hv_pte_get_present(src_pte) ||
158 !hv_pte_get_readable(src_pte) ||
159 hv_pte_get_mode(src_pte) != HV_PTE_MODE_CACHE_TILE_L3)
160 break;
161 if (get_remote_cache_cpu(src_pte) == smp_processor_id())
162 break;
163 src_page = pfn_to_page(pte_pfn(src_pte));
164 get_page(src_page);
165 if (pte_val(src_pte) != pte_val(*src_ptep)) {
166 put_page(src_page);
167 goto retry_source;
168 }
169 if (pte_huge(src_pte)) {
170 /* Adjust the PTE to correspond to a small page */
171 int pfn = pte_pfn(src_pte);
172 pfn += (((unsigned long)source & (HPAGE_SIZE-1))
173 >> PAGE_SHIFT);
174 src_pte = pfn_pte(pfn, src_pte);
175 src_pte = pte_mksmall(src_pte);
176 }
177
178 /* Is the destination page writable? */
179retry_dest:
180 dst_ptep = virt_to_pte(current->mm, (unsigned long)dest);
181 if (dst_ptep == NULL) {
182 put_page(src_page);
183 break;
184 }
185 dst_pte = *dst_ptep;
186 if (!hv_pte_get_present(dst_pte) ||
187 !hv_pte_get_writable(dst_pte)) {
188 put_page(src_page);
189 break;
190 }
191 dst_page = pfn_to_page(pte_pfn(dst_pte));
192 if (dst_page == src_page) {
193 /*
194 * Source and dest are on the same page; this
195 * potentially exposes us to incoherence if any
196 * part of src and dest overlap on a cache line.
197 * Just give up rather than trying to be precise.
198 */
199 put_page(src_page);
200 break;
201 }
202 get_page(dst_page);
203 if (pte_val(dst_pte) != pte_val(*dst_ptep)) {
204 put_page(dst_page);
205 goto retry_dest;
206 }
207 if (pte_huge(dst_pte)) {
208 /* Adjust the PTE to correspond to a small page */
209 int pfn = pte_pfn(dst_pte);
210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
211 >> PAGE_SHIFT);
212 dst_pte = pfn_pte(pfn, dst_pte);
213 dst_pte = pte_mksmall(dst_pte);
214 }
215
216 /* All looks good: create a cachable PTE and copy from it */
217 copy_size = len;
218 bytes_left_on_page =
219 PAGE_SIZE - (((int)source) & (PAGE_SIZE-1));
220 if (copy_size > bytes_left_on_page)
221 copy_size = bytes_left_on_page;
222 bytes_left_on_page =
223 PAGE_SIZE - (((int)dest) & (PAGE_SIZE-1));
224 if (copy_size > bytes_left_on_page)
225 copy_size = bytes_left_on_page;
226 memcpy_multicache(dest, source, dst_pte, src_pte, copy_size);
227
228 /* Release the pages */
229 put_page(dst_page);
230 put_page(src_page);
231
232 /* Continue on the next page */
233 dest += copy_size;
234 source += copy_size;
235 len -= copy_size;
236 }
237
238 return func(dest, source, len);
239}
240
241void *memcpy(void *to, const void *from, __kernel_size_t n)
242{
243 if (n < LARGE_COPY_CUTOFF)
244 return (void *)__memcpy_asm(to, from, n);
245 else
246 return (void *)fast_copy(to, from, n, __memcpy_asm);
247}
248
249unsigned long __copy_to_user_inatomic(void __user *to, const void *from,
250 unsigned long n)
251{
252 if (n < LARGE_COPY_CUTOFF)
253 return __copy_to_user_inatomic_asm(to, from, n);
254 else
255 return fast_copy(to, from, n, __copy_to_user_inatomic_asm);
256}
257
258unsigned long __copy_from_user_inatomic(void *to, const void __user *from,
259 unsigned long n)
260{
261 if (n < LARGE_COPY_CUTOFF)
262 return __copy_from_user_inatomic_asm(to, from, n);
263 else
264 return fast_copy(to, from, n, __copy_from_user_inatomic_asm);
265}
266
267unsigned long __copy_from_user_zeroing(void *to, const void __user *from,
268 unsigned long n)
269{
270 if (n < LARGE_COPY_CUTOFF)
271 return __copy_from_user_zeroing_asm(to, from, n);
272 else
273 return fast_copy(to, from, n, __copy_from_user_zeroing_asm);
274}
275
276#endif /* !CHIP_HAS_COHERENT_LOCAL_CACHE() */
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 37440caa7370..88c7016492c4 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -31,6 +31,7 @@
31 ".pushsection .coldtext.memcpy,\"ax\";" \ 31 ".pushsection .coldtext.memcpy,\"ax\";" \
32 "2: { move r0, %2; jrp lr };" \ 32 "2: { move r0, %2; jrp lr };" \
33 ".section __ex_table,\"a\";" \ 33 ".section __ex_table,\"a\";" \
34 ".align 8;" \
34 ".quad 1b, 2b;" \ 35 ".quad 1b, 2b;" \
35 ".popsection" \ 36 ".popsection" \
36 : "=m" (*(p)) : "r" (v), "r" (n)); \ 37 : "=m" (*(p)) : "r" (v), "r" (n)); \
@@ -43,6 +44,7 @@
43 ".pushsection .coldtext.memcpy,\"ax\";" \ 44 ".pushsection .coldtext.memcpy,\"ax\";" \
44 "2: { move r0, %2; jrp lr };" \ 45 "2: { move r0, %2; jrp lr };" \
45 ".section __ex_table,\"a\";" \ 46 ".section __ex_table,\"a\";" \
47 ".align 8;" \
46 ".quad 1b, 2b;" \ 48 ".quad 1b, 2b;" \
47 ".popsection" \ 49 ".popsection" \
48 : "=r" (__v) : "m" (*(p)), "r" (n)); \ 50 : "=r" (__v) : "m" (*(p)), "r" (n)); \
diff --git a/arch/tile/lib/memset_32.c b/arch/tile/lib/memset_32.c
index 57dbb3a5bff8..2042bfe6595f 100644
--- a/arch/tile/lib/memset_32.c
+++ b/arch/tile/lib/memset_32.c
@@ -12,13 +12,10 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#include <arch/chip.h>
16
17#include <linux/types.h> 15#include <linux/types.h>
18#include <linux/string.h> 16#include <linux/string.h>
19#include <linux/module.h> 17#include <linux/module.h>
20 18#include <arch/chip.h>
21#undef memset
22 19
23void *memset(void *s, int c, size_t n) 20void *memset(void *s, int c, size_t n)
24{ 21{
@@ -26,11 +23,7 @@ void *memset(void *s, int c, size_t n)
26 int n32; 23 int n32;
27 uint32_t v16, v32; 24 uint32_t v16, v32;
28 uint8_t *out8 = s; 25 uint8_t *out8 = s;
29#if !CHIP_HAS_WH64()
30 int ahead32;
31#else
32 int to_align32; 26 int to_align32;
33#endif
34 27
35 /* Experimentation shows that a trivial tight loop is a win up until 28 /* Experimentation shows that a trivial tight loop is a win up until
36 * around a size of 20, where writing a word at a time starts to win. 29 * around a size of 20, where writing a word at a time starts to win.
@@ -61,21 +54,6 @@ void *memset(void *s, int c, size_t n)
61 return s; 54 return s;
62 } 55 }
63 56
64#if !CHIP_HAS_WH64()
65 /* Use a spare issue slot to start prefetching the first cache
66 * line early. This instruction is free as the store can be buried
67 * in otherwise idle issue slots doing ALU ops.
68 */
69 __insn_prefetch(out8);
70
71 /* We prefetch the end so that a short memset that spans two cache
72 * lines gets some prefetching benefit. Again we believe this is free
73 * to issue.
74 */
75 __insn_prefetch(&out8[n - 1]);
76#endif /* !CHIP_HAS_WH64() */
77
78
79 /* Align 'out8'. We know n >= 3 so this won't write past the end. */ 57 /* Align 'out8'. We know n >= 3 so this won't write past the end. */
80 while (((uintptr_t) out8 & 3) != 0) { 58 while (((uintptr_t) out8 & 3) != 0) {
81 *out8++ = c; 59 *out8++ = c;
@@ -96,90 +74,6 @@ void *memset(void *s, int c, size_t n)
96 /* This must be at least 8 or the following loop doesn't work. */ 74 /* This must be at least 8 or the following loop doesn't work. */
97#define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4) 75#define CACHE_LINE_SIZE_IN_WORDS (CHIP_L2_LINE_SIZE() / 4)
98 76
99#if !CHIP_HAS_WH64()
100
101 ahead32 = CACHE_LINE_SIZE_IN_WORDS;
102
103 /* We already prefetched the first and last cache lines, so
104 * we only need to do more prefetching if we are storing
105 * to more than two cache lines.
106 */
107 if (n32 > CACHE_LINE_SIZE_IN_WORDS * 2) {
108 int i;
109
110 /* Prefetch the next several cache lines.
111 * This is the setup code for the software-pipelined
112 * loop below.
113 */
114#define MAX_PREFETCH 5
115 ahead32 = n32 & -CACHE_LINE_SIZE_IN_WORDS;
116 if (ahead32 > MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS)
117 ahead32 = MAX_PREFETCH * CACHE_LINE_SIZE_IN_WORDS;
118
119 for (i = CACHE_LINE_SIZE_IN_WORDS;
120 i < ahead32; i += CACHE_LINE_SIZE_IN_WORDS)
121 __insn_prefetch(&out32[i]);
122 }
123
124 if (n32 > ahead32) {
125 while (1) {
126 int j;
127
128 /* Prefetch by reading one word several cache lines
129 * ahead. Since loads are non-blocking this will
130 * cause the full cache line to be read while we are
131 * finishing earlier cache lines. Using a store
132 * here causes microarchitectural performance
133 * problems where a victimizing store miss goes to
134 * the head of the retry FIFO and locks the pipe for
135 * a few cycles. So a few subsequent stores in this
136 * loop go into the retry FIFO, and then later
137 * stores see other stores to the same cache line
138 * are already in the retry FIFO and themselves go
139 * into the retry FIFO, filling it up and grinding
140 * to a halt waiting for the original miss to be
141 * satisfied.
142 */
143 __insn_prefetch(&out32[ahead32]);
144
145#if CACHE_LINE_SIZE_IN_WORDS % 4 != 0
146#error "Unhandled CACHE_LINE_SIZE_IN_WORDS"
147#endif
148
149 n32 -= CACHE_LINE_SIZE_IN_WORDS;
150
151 /* Save icache space by only partially unrolling
152 * this loop.
153 */
154 for (j = CACHE_LINE_SIZE_IN_WORDS / 4; j > 0; j--) {
155 *out32++ = v32;
156 *out32++ = v32;
157 *out32++ = v32;
158 *out32++ = v32;
159 }
160
161 /* To save compiled code size, reuse this loop even
162 * when we run out of prefetching to do by dropping
163 * ahead32 down.
164 */
165 if (n32 <= ahead32) {
166 /* Not even a full cache line left,
167 * so stop now.
168 */
169 if (n32 < CACHE_LINE_SIZE_IN_WORDS)
170 break;
171
172 /* Choose a small enough value that we don't
173 * prefetch past the end. There's no sense
174 * in touching cache lines we don't have to.
175 */
176 ahead32 = CACHE_LINE_SIZE_IN_WORDS - 1;
177 }
178 }
179 }
180
181#else /* CHIP_HAS_WH64() */
182
183 /* Determine how many words we need to emit before the 'out32' 77 /* Determine how many words we need to emit before the 'out32'
184 * pointer becomes aligned modulo the cache line size. 78 * pointer becomes aligned modulo the cache line size.
185 */ 79 */
@@ -236,8 +130,6 @@ void *memset(void *s, int c, size_t n)
236 n32 &= CACHE_LINE_SIZE_IN_WORDS - 1; 130 n32 &= CACHE_LINE_SIZE_IN_WORDS - 1;
237 } 131 }
238 132
239#endif /* CHIP_HAS_WH64() */
240
241 /* Now handle any leftover values. */ 133 /* Now handle any leftover values. */
242 if (n32 != 0) { 134 if (n32 != 0) {
243 do { 135 do {
diff --git a/arch/tile/lib/memset_64.c b/arch/tile/lib/memset_64.c
index 3873085711d5..03ef69cd73de 100644
--- a/arch/tile/lib/memset_64.c
+++ b/arch/tile/lib/memset_64.c
@@ -12,13 +12,11 @@
12 * more details. 12 * more details.
13 */ 13 */
14 14
15#include <arch/chip.h>
16
17#include <linux/types.h> 15#include <linux/types.h>
18#include <linux/string.h> 16#include <linux/string.h>
19#include <linux/module.h> 17#include <linux/module.h>
20 18#include <arch/chip.h>
21#undef memset 19#include "string-endian.h"
22 20
23void *memset(void *s, int c, size_t n) 21void *memset(void *s, int c, size_t n)
24{ 22{
@@ -70,8 +68,7 @@ void *memset(void *s, int c, size_t n)
70 n64 = n >> 3; 68 n64 = n >> 3;
71 69
72 /* Tile input byte out to 64 bits. */ 70 /* Tile input byte out to 64 bits. */
73 /* KLUDGE */ 71 v64 = copy_byte(c);
74 v64 = 0x0101010101010101ULL * (uint8_t)c;
75 72
76 /* This must be at least 8 or the following loop doesn't work. */ 73 /* This must be at least 8 or the following loop doesn't work. */
77#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8) 74#define CACHE_LINE_SIZE_IN_DOUBLEWORDS (CHIP_L2_LINE_SIZE() / 8)
diff --git a/arch/tile/lib/strchr_32.c b/arch/tile/lib/strchr_32.c
index c94e6f7ae7b5..841fe6963019 100644
--- a/arch/tile/lib/strchr_32.c
+++ b/arch/tile/lib/strchr_32.c
@@ -16,8 +16,6 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#undef strchr
20
21char *strchr(const char *s, int c) 19char *strchr(const char *s, int c)
22{ 20{
23 int z, g; 21 int z, g;
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
index f39f9dc422b0..fe6e31c06f8d 100644
--- a/arch/tile/lib/strchr_64.c
+++ b/arch/tile/lib/strchr_64.c
@@ -26,7 +26,7 @@ char *strchr(const char *s, int c)
26 const uint64_t *p = (const uint64_t *)(s_int & -8); 26 const uint64_t *p = (const uint64_t *)(s_int & -8);
27 27
28 /* Create eight copies of the byte for which we are looking. */ 28 /* Create eight copies of the byte for which we are looking. */
29 const uint64_t goal = 0x0101010101010101ULL * (uint8_t) c; 29 const uint64_t goal = copy_byte(c);
30 30
31 /* Read the first aligned word, but force bytes before the string to 31 /* Read the first aligned word, but force bytes before the string to
32 * match neither zero nor goal (we make sure the high bit of each 32 * match neither zero nor goal (we make sure the high bit of each
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
index c0eed7ce69c3..2e49cbfe9371 100644
--- a/arch/tile/lib/string-endian.h
+++ b/arch/tile/lib/string-endian.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved. 2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -31,3 +31,14 @@
31#define CFZ(x) __insn_clz(x) 31#define CFZ(x) __insn_clz(x)
32#define REVCZ(x) __insn_ctz(x) 32#define REVCZ(x) __insn_ctz(x)
33#endif 33#endif
34
35/*
36 * Create eight copies of the byte in a uint64_t. Byte Shuffle uses
37 * the bytes of srcB as the index into the dest vector to select a
38 * byte. With all indices of zero, the first byte is copied into all
39 * the other bytes.
40 */
41static inline uint64_t copy_byte(uint8_t byte)
42{
43 return __insn_shufflebytes(byte, 0, 0);
44}
diff --git a/arch/tile/lib/strlen_32.c b/arch/tile/lib/strlen_32.c
index 4974292a5534..f26f88e11e4a 100644
--- a/arch/tile/lib/strlen_32.c
+++ b/arch/tile/lib/strlen_32.c
@@ -16,8 +16,6 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18
19#undef strlen
20
21size_t strlen(const char *s) 19size_t strlen(const char *s)
22{ 20{
23 /* Get an aligned pointer. */ 21 /* Get an aligned pointer. */
diff --git a/arch/tile/lib/strnlen_32.c b/arch/tile/lib/strnlen_32.c
new file mode 100644
index 000000000000..1434141d9e01
--- /dev/null
+++ b/arch/tile/lib/strnlen_32.c
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18
19size_t strnlen(const char *s, size_t count)
20{
21 /* Get an aligned pointer. */
22 const uintptr_t s_int = (uintptr_t) s;
23 const uint32_t *p = (const uint32_t *)(s_int & -4);
24 size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
25 size_t len;
26 uint32_t v, bits;
27
28 /* Avoid page fault risk by not reading any bytes when count is 0. */
29 if (count == 0)
30 return 0;
31
32 /* Read first word, but force bytes before the string to be nonzero. */
33 v = *p | ((1 << ((s_int << 3) & 31)) - 1);
34
35 while ((bits = __insn_seqb(v, 0)) == 0) {
36 if (bytes_read >= count) {
37 /* Read COUNT bytes and didn't find the terminator. */
38 return count;
39 }
40 v = *++p;
41 bytes_read += sizeof(v);
42 }
43
44 len = ((const char *) p) + (__insn_ctz(bits) >> 3) - s;
45 return (len < count ? len : count);
46}
47EXPORT_SYMBOL(strnlen);
diff --git a/arch/tile/lib/strnlen_64.c b/arch/tile/lib/strnlen_64.c
new file mode 100644
index 000000000000..2e8de6a5136f
--- /dev/null
+++ b/arch/tile/lib/strnlen_64.c
@@ -0,0 +1,48 @@
1/*
2 * Copyright 2013 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/types.h>
16#include <linux/string.h>
17#include <linux/module.h>
18#include "string-endian.h"
19
20size_t strnlen(const char *s, size_t count)
21{
22 /* Get an aligned pointer. */
23 const uintptr_t s_int = (uintptr_t) s;
24 const uint64_t *p = (const uint64_t *)(s_int & -8);
25 size_t bytes_read = sizeof(*p) - (s_int & (sizeof(*p) - 1));
26 size_t len;
27 uint64_t v, bits;
28
29 /* Avoid page fault risk by not reading any bytes when count is 0. */
30 if (count == 0)
31 return 0;
32
33 /* Read and MASK the first word. */
34 v = *p | MASK(s_int);
35
36 while ((bits = __insn_v1cmpeqi(v, 0)) == 0) {
37 if (bytes_read >= count) {
38 /* Read COUNT bytes and didn't find the terminator. */
39 return count;
40 }
41 v = *++p;
42 bytes_read += sizeof(v);
43 }
44
45 len = ((const char *) p) + (CFZ(bits) >> 3) - s;
46 return (len < count ? len : count);
47}
48EXPORT_SYMBOL(strnlen);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index b62d002af009..1bc162224638 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -36,6 +36,7 @@ strnlen_user_fault:
36 { move r0, zero; jrp lr } 36 { move r0, zero; jrp lr }
37 ENDPROC(strnlen_user_fault) 37 ENDPROC(strnlen_user_fault)
38 .section __ex_table,"a" 38 .section __ex_table,"a"
39 .align 4
39 .word 1b, strnlen_user_fault 40 .word 1b, strnlen_user_fault
40 .popsection 41 .popsection
41 42
@@ -47,18 +48,20 @@ strnlen_user_fault:
47 */ 48 */
48STD_ENTRY(strncpy_from_user_asm) 49STD_ENTRY(strncpy_from_user_asm)
49 { bz r2, 2f; move r3, r0 } 50 { bz r2, 2f; move r3, r0 }
501: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } 511: { lb_u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
51 { sb r0, r4; addi r0, r0, 1 } 52 { sb r0, r4; addi r0, r0, 1 }
52 bz r2, 2f 53 bz r4, 2f
53 bnzt r4, 1b 54 bnzt r2, 1b
54 addi r0, r0, -1 /* don't count the trailing NUL */ 55 { sub r0, r0, r3; jrp lr }
552: { sub r0, r0, r3; jrp lr } 562: addi r0, r0, -1 /* don't count the trailing NUL */
57 { sub r0, r0, r3; jrp lr }
56 STD_ENDPROC(strncpy_from_user_asm) 58 STD_ENDPROC(strncpy_from_user_asm)
57 .pushsection .fixup,"ax" 59 .pushsection .fixup,"ax"
58strncpy_from_user_fault: 60strncpy_from_user_fault:
59 { movei r0, -EFAULT; jrp lr } 61 { movei r0, -EFAULT; jrp lr }
60 ENDPROC(strncpy_from_user_fault) 62 ENDPROC(strncpy_from_user_fault)
61 .section __ex_table,"a" 63 .section __ex_table,"a"
64 .align 4
62 .word 1b, strncpy_from_user_fault 65 .word 1b, strncpy_from_user_fault
63 .popsection 66 .popsection
64 67
@@ -77,6 +80,7 @@ STD_ENTRY(clear_user_asm)
77 bnzt r1, 1b 80 bnzt r1, 1b
782: { move r0, r1; jrp lr } 812: { move r0, r1; jrp lr }
79 .pushsection __ex_table,"a" 82 .pushsection __ex_table,"a"
83 .align 4
80 .word 1b, 2b 84 .word 1b, 2b
81 .popsection 85 .popsection
82 86
@@ -86,6 +90,7 @@ STD_ENTRY(clear_user_asm)
862: { move r0, r1; jrp lr } 902: { move r0, r1; jrp lr }
87 STD_ENDPROC(clear_user_asm) 91 STD_ENDPROC(clear_user_asm)
88 .pushsection __ex_table,"a" 92 .pushsection __ex_table,"a"
93 .align 4
89 .word 1b, 2b 94 .word 1b, 2b
90 .popsection 95 .popsection
91 96
@@ -105,25 +110,7 @@ STD_ENTRY(flush_user_asm)
1052: { move r0, r1; jrp lr } 1102: { move r0, r1; jrp lr }
106 STD_ENDPROC(flush_user_asm) 111 STD_ENDPROC(flush_user_asm)
107 .pushsection __ex_table,"a" 112 .pushsection __ex_table,"a"
108 .word 1b, 2b 113 .align 4
109 .popsection
110
111/*
112 * inv_user_asm takes the user target address in r0 and the
113 * number of bytes to invalidate in r1.
114 * It returns the number of not inv'able bytes (hopefully zero) in r0.
115 */
116STD_ENTRY(inv_user_asm)
117 bz r1, 2f
118 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
119 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
120 { and r0, r0, r2; and r1, r1, r2 }
121 { sub r1, r1, r0 }
1221: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
123 { addi r0, r0, CHIP_INV_STRIDE(); bnzt r1, 1b }
1242: { move r0, r1; jrp lr }
125 STD_ENDPROC(inv_user_asm)
126 .pushsection __ex_table,"a"
127 .word 1b, 2b 114 .word 1b, 2b
128 .popsection 115 .popsection
129 116
@@ -143,5 +130,6 @@ STD_ENTRY(finv_user_asm)
1432: { move r0, r1; jrp lr } 1302: { move r0, r1; jrp lr }
144 STD_ENDPROC(finv_user_asm) 131 STD_ENDPROC(finv_user_asm)
145 .pushsection __ex_table,"a" 132 .pushsection __ex_table,"a"
133 .align 4
146 .word 1b, 2b 134 .word 1b, 2b
147 .popsection 135 .popsection
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index adb2dbbc70cd..b3b31a3306f8 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -36,6 +36,7 @@ strnlen_user_fault:
36 { move r0, zero; jrp lr } 36 { move r0, zero; jrp lr }
37 ENDPROC(strnlen_user_fault) 37 ENDPROC(strnlen_user_fault)
38 .section __ex_table,"a" 38 .section __ex_table,"a"
39 .align 8
39 .quad 1b, strnlen_user_fault 40 .quad 1b, strnlen_user_fault
40 .popsection 41 .popsection
41 42
@@ -47,18 +48,20 @@ strnlen_user_fault:
47 */ 48 */
48STD_ENTRY(strncpy_from_user_asm) 49STD_ENTRY(strncpy_from_user_asm)
49 { beqz r2, 2f; move r3, r0 } 50 { beqz r2, 2f; move r3, r0 }
501: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 } 511: { ld1u r4, r1; addi r1, r1, 1; addi r2, r2, -1 }
51 { st1 r0, r4; addi r0, r0, 1 } 52 { st1 r0, r4; addi r0, r0, 1 }
52 beqz r2, 2f 53 beqz r4, 2f
53 bnezt r4, 1b 54 bnezt r2, 1b
54 addi r0, r0, -1 /* don't count the trailing NUL */ 55 { sub r0, r0, r3; jrp lr }
552: { sub r0, r0, r3; jrp lr } 562: addi r0, r0, -1 /* don't count the trailing NUL */
57 { sub r0, r0, r3; jrp lr }
56 STD_ENDPROC(strncpy_from_user_asm) 58 STD_ENDPROC(strncpy_from_user_asm)
57 .pushsection .fixup,"ax" 59 .pushsection .fixup,"ax"
58strncpy_from_user_fault: 60strncpy_from_user_fault:
59 { movei r0, -EFAULT; jrp lr } 61 { movei r0, -EFAULT; jrp lr }
60 ENDPROC(strncpy_from_user_fault) 62 ENDPROC(strncpy_from_user_fault)
61 .section __ex_table,"a" 63 .section __ex_table,"a"
64 .align 8
62 .quad 1b, strncpy_from_user_fault 65 .quad 1b, strncpy_from_user_fault
63 .popsection 66 .popsection
64 67
@@ -77,6 +80,7 @@ STD_ENTRY(clear_user_asm)
77 bnezt r1, 1b 80 bnezt r1, 1b
782: { move r0, r1; jrp lr } 812: { move r0, r1; jrp lr }
79 .pushsection __ex_table,"a" 82 .pushsection __ex_table,"a"
83 .align 8
80 .quad 1b, 2b 84 .quad 1b, 2b
81 .popsection 85 .popsection
82 86
@@ -86,6 +90,7 @@ STD_ENTRY(clear_user_asm)
862: { move r0, r1; jrp lr } 902: { move r0, r1; jrp lr }
87 STD_ENDPROC(clear_user_asm) 91 STD_ENDPROC(clear_user_asm)
88 .pushsection __ex_table,"a" 92 .pushsection __ex_table,"a"
93 .align 8
89 .quad 1b, 2b 94 .quad 1b, 2b
90 .popsection 95 .popsection
91 96
@@ -105,25 +110,7 @@ STD_ENTRY(flush_user_asm)
1052: { move r0, r1; jrp lr } 1102: { move r0, r1; jrp lr }
106 STD_ENDPROC(flush_user_asm) 111 STD_ENDPROC(flush_user_asm)
107 .pushsection __ex_table,"a" 112 .pushsection __ex_table,"a"
108 .quad 1b, 2b 113 .align 8
109 .popsection
110
111/*
112 * inv_user_asm takes the user target address in r0 and the
113 * number of bytes to invalidate in r1.
114 * It returns the number of not inv'able bytes (hopefully zero) in r0.
115 */
116STD_ENTRY(inv_user_asm)
117 beqz r1, 2f
118 { movei r2, L2_CACHE_BYTES; add r1, r0, r1 }
119 { sub r2, zero, r2; addi r1, r1, L2_CACHE_BYTES-1 }
120 { and r0, r0, r2; and r1, r1, r2 }
121 { sub r1, r1, r0 }
1221: { inv r0; addi r1, r1, -CHIP_INV_STRIDE() }
123 { addi r0, r0, CHIP_INV_STRIDE(); bnezt r1, 1b }
1242: { move r0, r1; jrp lr }
125 STD_ENDPROC(inv_user_asm)
126 .pushsection __ex_table,"a"
127 .quad 1b, 2b 114 .quad 1b, 2b
128 .popsection 115 .popsection
129 116
@@ -143,5 +130,6 @@ STD_ENTRY(finv_user_asm)
1432: { move r0, r1; jrp lr } 1302: { move r0, r1; jrp lr }
144 STD_ENDPROC(finv_user_asm) 131 STD_ENDPROC(finv_user_asm)
145 .pushsection __ex_table,"a" 132 .pushsection __ex_table,"a"
133 .align 8
146 .quad 1b, 2b 134 .quad 1b, 2b
147 .popsection 135 .popsection
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c
index 743c951c61b0..23f044e8a7ab 100644
--- a/arch/tile/mm/elf.c
+++ b/arch/tile/mm/elf.c
@@ -21,7 +21,8 @@
21#include <asm/pgtable.h> 21#include <asm/pgtable.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <arch/sim_def.h> 24#include <asm/vdso.h>
25#include <arch/sim.h>
25 26
26/* Notify a running simulator, if any, that an exec just occurred. */ 27/* Notify a running simulator, if any, that an exec just occurred. */
27static void sim_notify_exec(const char *binary_name) 28static void sim_notify_exec(const char *binary_name)
@@ -38,21 +39,55 @@ static void sim_notify_exec(const char *binary_name)
38 39
39static int notify_exec(struct mm_struct *mm) 40static int notify_exec(struct mm_struct *mm)
40{ 41{
41 int retval = 0; /* failure */ 42 char *buf, *path;
42 43 struct vm_area_struct *vma;
43 if (mm->exe_file) { 44
44 char *buf = (char *) __get_free_page(GFP_KERNEL); 45 if (!sim_is_simulator())
45 if (buf) { 46 return 1;
46 char *path = d_path(&mm->exe_file->f_path, 47
47 buf, PAGE_SIZE); 48 if (mm->exe_file == NULL)
48 if (!IS_ERR(path)) { 49 return 0;
49 sim_notify_exec(path); 50
50 retval = 1; 51 for (vma = current->mm->mmap; ; vma = vma->vm_next) {
51 } 52 if (vma == NULL)
52 free_page((unsigned long)buf); 53 return 0;
54 if (vma->vm_file == mm->exe_file)
55 break;
56 }
57
58 buf = (char *) __get_free_page(GFP_KERNEL);
59 if (buf == NULL)
60 return 0;
61
62 path = d_path(&mm->exe_file->f_path, buf, PAGE_SIZE);
63 if (IS_ERR(path)) {
64 free_page((unsigned long)buf);
65 return 0;
66 }
67
68 /*
69 * Notify simulator of an ET_DYN object so we know the load address.
70 * The somewhat cryptic overuse of SIM_CONTROL_DLOPEN allows us
71 * to be backward-compatible with older simulator releases.
72 */
73 if (vma->vm_start == (ELF_ET_DYN_BASE & PAGE_MASK)) {
74 char buf[64];
75 int i;
76
77 snprintf(buf, sizeof(buf), "0x%lx:@", vma->vm_start);
78 for (i = 0; ; ++i) {
79 char c = buf[i];
80 __insn_mtspr(SPR_SIM_CONTROL,
81 (SIM_CONTROL_DLOPEN
82 | (c << _SIM_CONTROL_OPERATOR_BITS)));
83 if (c == '\0')
84 break;
53 } 85 }
54 } 86 }
55 return retval; 87
88 sim_notify_exec(path);
89 free_page((unsigned long)buf);
90 return 1;
56} 91}
57 92
58/* Notify a running simulator, if any, that we loaded an interpreter. */ 93/* Notify a running simulator, if any, that we loaded an interpreter. */
@@ -68,37 +103,10 @@ static void sim_notify_interp(unsigned long load_addr)
68} 103}
69 104
70 105
71/* Kernel address of page used to map read-only kernel data into userspace. */
72static void *vdso_page;
73
74/* One-entry array used for install_special_mapping. */
75static struct page *vdso_pages[1];
76
77static int __init vdso_setup(void)
78{
79 vdso_page = (void *)get_zeroed_page(GFP_ATOMIC);
80 memcpy(vdso_page, __rt_sigreturn, __rt_sigreturn_end - __rt_sigreturn);
81 vdso_pages[0] = virt_to_page(vdso_page);
82 return 0;
83}
84device_initcall(vdso_setup);
85
86const char *arch_vma_name(struct vm_area_struct *vma)
87{
88 if (vma->vm_private_data == vdso_pages)
89 return "[vdso]";
90#ifndef __tilegx__
91 if (vma->vm_start == MEM_USER_INTRPT)
92 return "[intrpt]";
93#endif
94 return NULL;
95}
96
97int arch_setup_additional_pages(struct linux_binprm *bprm, 106int arch_setup_additional_pages(struct linux_binprm *bprm,
98 int executable_stack) 107 int executable_stack)
99{ 108{
100 struct mm_struct *mm = current->mm; 109 struct mm_struct *mm = current->mm;
101 unsigned long vdso_base;
102 int retval = 0; 110 int retval = 0;
103 111
104 down_write(&mm->mmap_sem); 112 down_write(&mm->mmap_sem);
@@ -111,14 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
111 if (!notify_exec(mm)) 119 if (!notify_exec(mm))
112 sim_notify_exec(bprm->filename); 120 sim_notify_exec(bprm->filename);
113 121
114 /* 122 retval = setup_vdso_pages();
115 * MAYWRITE to allow gdb to COW and set breakpoints
116 */
117 vdso_base = VDSO_BASE;
118 retval = install_special_mapping(mm, vdso_base, PAGE_SIZE,
119 VM_READ|VM_EXEC|
120 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
121 vdso_pages);
122 123
123#ifndef __tilegx__ 124#ifndef __tilegx__
124 /* 125 /*
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index f7f99f90cbe0..111d5a9b76f1 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -34,6 +34,7 @@
34#include <linux/hugetlb.h> 34#include <linux/hugetlb.h>
35#include <linux/syscalls.h> 35#include <linux/syscalls.h>
36#include <linux/uaccess.h> 36#include <linux/uaccess.h>
37#include <linux/kdebug.h>
37 38
38#include <asm/pgalloc.h> 39#include <asm/pgalloc.h>
39#include <asm/sections.h> 40#include <asm/sections.h>
@@ -122,10 +123,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
122 pmd_k = pmd_offset(pud_k, address); 123 pmd_k = pmd_offset(pud_k, address);
123 if (!pmd_present(*pmd_k)) 124 if (!pmd_present(*pmd_k))
124 return NULL; 125 return NULL;
125 if (!pmd_present(*pmd)) { 126 if (!pmd_present(*pmd))
126 set_pmd(pmd, *pmd_k); 127 set_pmd(pmd, *pmd_k);
127 arch_flush_lazy_mmu_mode(); 128 else
128 } else
129 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k)); 129 BUG_ON(pmd_ptfn(*pmd) != pmd_ptfn(*pmd_k));
130 return pmd_k; 130 return pmd_k;
131} 131}
@@ -283,7 +283,7 @@ static int handle_page_fault(struct pt_regs *regs,
283 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 283 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
284 (write ? FAULT_FLAG_WRITE : 0)); 284 (write ? FAULT_FLAG_WRITE : 0));
285 285
286 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); 286 is_kernel_mode = !user_mode(regs);
287 287
288 tsk = validate_current(); 288 tsk = validate_current();
289 289
@@ -466,28 +466,15 @@ good_area:
466 } 466 }
467 } 467 }
468 468
469#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
470 /*
471 * If this was an asynchronous fault,
472 * restart the appropriate engine.
473 */
474 switch (fault_num) {
475#if CHIP_HAS_TILE_DMA() 469#if CHIP_HAS_TILE_DMA()
470 /* If this was a DMA TLB fault, restart the DMA engine. */
471 switch (fault_num) {
476 case INT_DMATLB_MISS: 472 case INT_DMATLB_MISS:
477 case INT_DMATLB_MISS_DWNCL: 473 case INT_DMATLB_MISS_DWNCL:
478 case INT_DMATLB_ACCESS: 474 case INT_DMATLB_ACCESS:
479 case INT_DMATLB_ACCESS_DWNCL: 475 case INT_DMATLB_ACCESS_DWNCL:
480 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK); 476 __insn_mtspr(SPR_DMA_CTR, SPR_DMA_CTR__REQUEST_MASK);
481 break; 477 break;
482#endif
483#if CHIP_HAS_SN_PROC()
484 case INT_SNITLB_MISS:
485 case INT_SNITLB_MISS_DWNCL:
486 __insn_mtspr(SPR_SNCTL,
487 __insn_mfspr(SPR_SNCTL) &
488 ~SPR_SNCTL__FRZPROC_MASK);
489 break;
490#endif
491 } 478 }
492#endif 479#endif
493 480
@@ -722,8 +709,60 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
722{ 709{
723 int is_page_fault; 710 int is_page_fault;
724 711
712#ifdef CONFIG_KPROBES
713 /*
714 * This is to notify the fault handler of the kprobes. The
715 * exception code is redundant as it is also carried in REGS,
716 * but we pass it anyhow.
717 */
718 if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
719 regs->faultnum, SIGSEGV) == NOTIFY_STOP)
720 return;
721#endif
722
723#ifdef __tilegx__
724 /*
725 * We don't need early do_page_fault_ics() support, since unlike
726 * Pro we don't need to worry about unlocking the atomic locks.
727 * There is only one current case in GX where we touch any memory
728 * under ICS other than our own kernel stack, and we handle that
729 * here. (If we crash due to trying to touch our own stack,
730 * we're in too much trouble for C code to help out anyway.)
731 */
732 if (write & ~1) {
733 unsigned long pc = write & ~1;
734 if (pc >= (unsigned long) __start_unalign_asm_code &&
735 pc < (unsigned long) __end_unalign_asm_code) {
736 struct thread_info *ti = current_thread_info();
737 /*
738 * Our EX_CONTEXT is still what it was from the
739 * initial unalign exception, but now we've faulted
740 * on the JIT page. We would like to complete the
741 * page fault however is appropriate, and then retry
742 * the instruction that caused the unalign exception.
743 * Our state has been "corrupted" by setting the low
744 * bit in "sp", and stashing r0..r3 in the
745 * thread_info area, so we revert all of that, then
746 * continue as if this were a normal page fault.
747 */
748 regs->sp &= ~1UL;
749 regs->regs[0] = ti->unalign_jit_tmp[0];
750 regs->regs[1] = ti->unalign_jit_tmp[1];
751 regs->regs[2] = ti->unalign_jit_tmp[2];
752 regs->regs[3] = ti->unalign_jit_tmp[3];
753 write &= 1;
754 } else {
755 pr_alert("%s/%d: ICS set at page fault at %#lx: %#lx\n",
756 current->comm, current->pid, pc, address);
757 show_regs(regs);
758 do_group_exit(SIGKILL);
759 return;
760 }
761 }
762#else
725 /* This case should have been handled by do_page_fault_ics(). */ 763 /* This case should have been handled by do_page_fault_ics(). */
726 BUG_ON(write & ~1); 764 BUG_ON(write & ~1);
765#endif
727 766
728#if CHIP_HAS_TILE_DMA() 767#if CHIP_HAS_TILE_DMA()
729 /* 768 /*
@@ -752,10 +791,6 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
752 case INT_DMATLB_MISS: 791 case INT_DMATLB_MISS:
753 case INT_DMATLB_MISS_DWNCL: 792 case INT_DMATLB_MISS_DWNCL:
754#endif 793#endif
755#if CHIP_HAS_SN_PROC()
756 case INT_SNITLB_MISS:
757 case INT_SNITLB_MISS_DWNCL:
758#endif
759 is_page_fault = 1; 794 is_page_fault = 1;
760 break; 795 break;
761 796
@@ -771,8 +806,8 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
771 panic("Bad fault number %d in do_page_fault", fault_num); 806 panic("Bad fault number %d in do_page_fault", fault_num);
772 } 807 }
773 808
774#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 809#if CHIP_HAS_TILE_DMA()
775 if (EX1_PL(regs->ex1) != USER_PL) { 810 if (!user_mode(regs)) {
776 struct async_tlb *async; 811 struct async_tlb *async;
777 switch (fault_num) { 812 switch (fault_num) {
778#if CHIP_HAS_TILE_DMA() 813#if CHIP_HAS_TILE_DMA()
@@ -783,12 +818,6 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
783 async = &current->thread.dma_async_tlb; 818 async = &current->thread.dma_async_tlb;
784 break; 819 break;
785#endif 820#endif
786#if CHIP_HAS_SN_PROC()
787 case INT_SNITLB_MISS:
788 case INT_SNITLB_MISS_DWNCL:
789 async = &current->thread.sn_async_tlb;
790 break;
791#endif
792 default: 821 default:
793 async = NULL; 822 async = NULL;
794 } 823 }
@@ -821,14 +850,22 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
821} 850}
822 851
823 852
824#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 853#if CHIP_HAS_TILE_DMA()
825/* 854/*
826 * Check an async_tlb structure to see if a deferred fault is waiting, 855 * This routine effectively re-issues asynchronous page faults
827 * and if so pass it to the page-fault code. 856 * when we are returning to user space.
828 */ 857 */
829static void handle_async_page_fault(struct pt_regs *regs, 858void do_async_page_fault(struct pt_regs *regs)
830 struct async_tlb *async)
831{ 859{
860 struct async_tlb *async = &current->thread.dma_async_tlb;
861
862 /*
863 * Clear thread flag early. If we re-interrupt while processing
864 * code here, we will reset it and recall this routine before
865 * returning to user space.
866 */
867 clear_thread_flag(TIF_ASYNC_TLB);
868
832 if (async->fault_num) { 869 if (async->fault_num) {
833 /* 870 /*
834 * Clear async->fault_num before calling the page-fault 871 * Clear async->fault_num before calling the page-fault
@@ -842,35 +879,15 @@ static void handle_async_page_fault(struct pt_regs *regs,
842 async->address, async->is_write); 879 async->address, async->is_write);
843 } 880 }
844} 881}
845 882#endif /* CHIP_HAS_TILE_DMA() */
846/*
847 * This routine effectively re-issues asynchronous page faults
848 * when we are returning to user space.
849 */
850void do_async_page_fault(struct pt_regs *regs)
851{
852 /*
853 * Clear thread flag early. If we re-interrupt while processing
854 * code here, we will reset it and recall this routine before
855 * returning to user space.
856 */
857 clear_thread_flag(TIF_ASYNC_TLB);
858
859#if CHIP_HAS_TILE_DMA()
860 handle_async_page_fault(regs, &current->thread.dma_async_tlb);
861#endif
862#if CHIP_HAS_SN_PROC()
863 handle_async_page_fault(regs, &current->thread.sn_async_tlb);
864#endif
865}
866#endif /* CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() */
867 883
868 884
869void vmalloc_sync_all(void) 885void vmalloc_sync_all(void)
870{ 886{
871#ifdef __tilegx__ 887#ifdef __tilegx__
872 /* Currently all L1 kernel pmd's are static and shared. */ 888 /* Currently all L1 kernel pmd's are static and shared. */
873 BUG_ON(pgd_index(VMALLOC_END) != pgd_index(VMALLOC_START)); 889 BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) !=
890 pgd_index(VMALLOC_START));
874#else 891#else
875 /* 892 /*
876 * Note that races in the updates of insync and start aren't 893 * Note that races in the updates of insync and start aren't
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 347d123b14be..0dc218294770 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -114,7 +114,6 @@ static void kmap_atomic_register(struct page *page, int type,
114 114
115 list_add(&amp->list, &amp_list); 115 list_add(&amp->list, &amp_list);
116 set_pte(ptep, pteval); 116 set_pte(ptep, pteval);
117 arch_flush_lazy_mmu_mode();
118 117
119 spin_unlock(&amp_lock); 118 spin_unlock(&amp_lock);
120 homecache_kpte_unlock(flags); 119 homecache_kpte_unlock(flags);
@@ -259,7 +258,6 @@ void __kunmap_atomic(void *kvaddr)
259 BUG_ON(vaddr >= (unsigned long)high_memory); 258 BUG_ON(vaddr >= (unsigned long)high_memory);
260 } 259 }
261 260
262 arch_flush_lazy_mmu_mode();
263 pagefault_enable(); 261 pagefault_enable();
264} 262}
265EXPORT_SYMBOL(__kunmap_atomic); 263EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 1ae911939a18..004ba568d93f 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -43,12 +43,9 @@
43#include "migrate.h" 43#include "migrate.h"
44 44
45 45
46#if CHIP_HAS_COHERENT_LOCAL_CACHE()
47
48/* 46/*
49 * The noallocl2 option suppresses all use of the L2 cache to cache 47 * The noallocl2 option suppresses all use of the L2 cache to cache
50 * locally from a remote home. There's no point in using it if we 48 * locally from a remote home.
51 * don't have coherent local caching, though.
52 */ 49 */
53static int __write_once noallocl2; 50static int __write_once noallocl2;
54static int __init set_noallocl2(char *str) 51static int __init set_noallocl2(char *str)
@@ -58,12 +55,6 @@ static int __init set_noallocl2(char *str)
58} 55}
59early_param("noallocl2", set_noallocl2); 56early_param("noallocl2", set_noallocl2);
60 57
61#else
62
63#define noallocl2 0
64
65#endif
66
67 58
68/* 59/*
69 * Update the irq_stat for cpus that we are going to interrupt 60 * Update the irq_stat for cpus that we are going to interrupt
@@ -172,7 +163,8 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
172 163
173static void homecache_finv_page_va(void* va, int home) 164static void homecache_finv_page_va(void* va, int home)
174{ 165{
175 if (home == smp_processor_id()) { 166 int cpu = get_cpu();
167 if (home == cpu) {
176 finv_buffer_local(va, PAGE_SIZE); 168 finv_buffer_local(va, PAGE_SIZE);
177 } else if (home == PAGE_HOME_HASH) { 169 } else if (home == PAGE_HOME_HASH) {
178 finv_buffer_remote(va, PAGE_SIZE, 1); 170 finv_buffer_remote(va, PAGE_SIZE, 1);
@@ -180,6 +172,7 @@ static void homecache_finv_page_va(void* va, int home)
180 BUG_ON(home < 0 || home >= NR_CPUS); 172 BUG_ON(home < 0 || home >= NR_CPUS);
181 finv_buffer_remote(va, PAGE_SIZE, 0); 173 finv_buffer_remote(va, PAGE_SIZE, 0);
182 } 174 }
175 put_cpu();
183} 176}
184 177
185void homecache_finv_map_page(struct page *page, int home) 178void homecache_finv_map_page(struct page *page, int home)
@@ -198,7 +191,7 @@ void homecache_finv_map_page(struct page *page, int home)
198#else 191#else
199 va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); 192 va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
200#endif 193#endif
201 ptep = virt_to_pte(NULL, (unsigned long)va); 194 ptep = virt_to_kpte(va);
202 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); 195 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
203 __set_pte(ptep, pte_set_home(pte, home)); 196 __set_pte(ptep, pte_set_home(pte, home));
204 homecache_finv_page_va((void *)va, home); 197 homecache_finv_page_va((void *)va, home);
@@ -263,10 +256,8 @@ static int pte_to_home(pte_t pte)
263 return PAGE_HOME_INCOHERENT; 256 return PAGE_HOME_INCOHERENT;
264 case HV_PTE_MODE_UNCACHED: 257 case HV_PTE_MODE_UNCACHED:
265 return PAGE_HOME_UNCACHED; 258 return PAGE_HOME_UNCACHED;
266#if CHIP_HAS_CBOX_HOME_MAP()
267 case HV_PTE_MODE_CACHE_HASH_L3: 259 case HV_PTE_MODE_CACHE_HASH_L3:
268 return PAGE_HOME_HASH; 260 return PAGE_HOME_HASH;
269#endif
270 } 261 }
271 panic("Bad PTE %#llx\n", pte.val); 262 panic("Bad PTE %#llx\n", pte.val);
272} 263}
@@ -323,20 +314,16 @@ pte_t pte_set_home(pte_t pte, int home)
323 HV_PTE_MODE_CACHE_NO_L3); 314 HV_PTE_MODE_CACHE_NO_L3);
324 } 315 }
325 } else 316 } else
326#if CHIP_HAS_CBOX_HOME_MAP()
327 if (hash_default) 317 if (hash_default)
328 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); 318 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
329 else 319 else
330#endif
331 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); 320 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
332 pte = hv_pte_set_nc(pte); 321 pte = hv_pte_set_nc(pte);
333 break; 322 break;
334 323
335#if CHIP_HAS_CBOX_HOME_MAP()
336 case PAGE_HOME_HASH: 324 case PAGE_HOME_HASH:
337 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); 325 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
338 break; 326 break;
339#endif
340 327
341 default: 328 default:
342 BUG_ON(home < 0 || home >= NR_CPUS || 329 BUG_ON(home < 0 || home >= NR_CPUS ||
@@ -346,7 +333,6 @@ pte_t pte_set_home(pte_t pte, int home)
346 break; 333 break;
347 } 334 }
348 335
349#if CHIP_HAS_NC_AND_NOALLOC_BITS()
350 if (noallocl2) 336 if (noallocl2)
351 pte = hv_pte_set_no_alloc_l2(pte); 337 pte = hv_pte_set_no_alloc_l2(pte);
352 338
@@ -355,7 +341,6 @@ pte_t pte_set_home(pte_t pte, int home)
355 hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { 341 hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
356 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); 342 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
357 } 343 }
358#endif
359 344
360 /* Checking this case here gives a better panic than from the hv. */ 345 /* Checking this case here gives a better panic than from the hv. */
361 BUG_ON(hv_pte_get_mode(pte) == 0); 346 BUG_ON(hv_pte_get_mode(pte) == 0);
@@ -371,19 +356,13 @@ EXPORT_SYMBOL(pte_set_home);
371 * so they're not suitable for anything but infrequent use. 356 * so they're not suitable for anything but infrequent use.
372 */ 357 */
373 358
374#if CHIP_HAS_CBOX_HOME_MAP()
375static inline int initial_page_home(void) { return PAGE_HOME_HASH; }
376#else
377static inline int initial_page_home(void) { return 0; }
378#endif
379
380int page_home(struct page *page) 359int page_home(struct page *page)
381{ 360{
382 if (PageHighMem(page)) { 361 if (PageHighMem(page)) {
383 return initial_page_home(); 362 return PAGE_HOME_HASH;
384 } else { 363 } else {
385 unsigned long kva = (unsigned long)page_address(page); 364 unsigned long kva = (unsigned long)page_address(page);
386 return pte_to_home(*virt_to_pte(NULL, kva)); 365 return pte_to_home(*virt_to_kpte(kva));
387 } 366 }
388} 367}
389EXPORT_SYMBOL(page_home); 368EXPORT_SYMBOL(page_home);
@@ -402,7 +381,7 @@ void homecache_change_page_home(struct page *page, int order, int home)
402 NULL, 0); 381 NULL, 0);
403 382
404 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { 383 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
405 pte_t *ptep = virt_to_pte(NULL, kva); 384 pte_t *ptep = virt_to_kpte(kva);
406 pte_t pteval = *ptep; 385 pte_t pteval = *ptep;
407 BUG_ON(!pte_present(pteval) || pte_huge(pteval)); 386 BUG_ON(!pte_present(pteval) || pte_huge(pteval));
408 __set_pte(ptep, pte_set_home(pteval, home)); 387 __set_pte(ptep, pte_set_home(pteval, home));
@@ -436,7 +415,7 @@ struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
436void __homecache_free_pages(struct page *page, unsigned int order) 415void __homecache_free_pages(struct page *page, unsigned int order)
437{ 416{
438 if (put_page_testzero(page)) { 417 if (put_page_testzero(page)) {
439 homecache_change_page_home(page, order, initial_page_home()); 418 homecache_change_page_home(page, order, PAGE_HOME_HASH);
440 if (order == 0) { 419 if (order == 0) {
441 free_hot_cold_page(page, 0); 420 free_hot_cold_page(page, 0);
442 } else { 421 } else {
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 650ccff8378c..e514899e1100 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -49,38 +49,6 @@ int huge_shift[HUGE_SHIFT_ENTRIES] = {
49#endif 49#endif
50}; 50};
51 51
52/*
53 * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
54 * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
55 * It locks the user pagetable, and bumps up the mm->nr_ptes field,
56 * but otherwise allocate the page table using the kernel versions.
57 */
58static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
59 unsigned long address)
60{
61 pte_t *new;
62
63 if (pmd_none(*pmd)) {
64 new = pte_alloc_one_kernel(mm, address);
65 if (!new)
66 return NULL;
67
68 smp_wmb(); /* See comment in __pte_alloc */
69
70 spin_lock(&mm->page_table_lock);
71 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
72 mm->nr_ptes++;
73 pmd_populate_kernel(mm, pmd, new);
74 new = NULL;
75 } else
76 VM_BUG_ON(pmd_trans_splitting(*pmd));
77 spin_unlock(&mm->page_table_lock);
78 if (new)
79 pte_free_kernel(mm, new);
80 }
81
82 return pte_offset_kernel(pmd, address);
83}
84#endif 52#endif
85 53
86pte_t *huge_pte_alloc(struct mm_struct *mm, 54pte_t *huge_pte_alloc(struct mm_struct *mm,
@@ -109,7 +77,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
109 else { 77 else {
110 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE]) 78 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
111 panic("Unexpected page size %#lx\n", sz); 79 panic("Unexpected page size %#lx\n", sz);
112 return pte_alloc_hugetlb(mm, pmd, addr); 80 return pte_alloc_map(mm, NULL, pmd, addr);
113 } 81 }
114 } 82 }
115#else 83#else
@@ -144,14 +112,14 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
144 112
145 /* Get the top-level page table entry. */ 113 /* Get the top-level page table entry. */
146 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0); 114 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
147 if (!pgd_present(*pgd))
148 return NULL;
149 115
150 /* We don't have four levels. */ 116 /* We don't have four levels. */
151 pud = pud_offset(pgd, addr); 117 pud = pud_offset(pgd, addr);
152#ifndef __PAGETABLE_PUD_FOLDED 118#ifndef __PAGETABLE_PUD_FOLDED
153# error support fourth page table level 119# error support fourth page table level
154#endif 120#endif
121 if (!pud_present(*pud))
122 return NULL;
155 123
156 /* Check for an L0 huge PTE, if we have three levels. */ 124 /* Check for an L0 huge PTE, if we have three levels. */
157#ifndef __PAGETABLE_PMD_FOLDED 125#ifndef __PAGETABLE_PMD_FOLDED
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index e182958c707d..4e316deb92fd 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -106,10 +106,8 @@ pte_t *get_prealloc_pte(unsigned long pfn)
106 */ 106 */
107static int initial_heap_home(void) 107static int initial_heap_home(void)
108{ 108{
109#if CHIP_HAS_CBOX_HOME_MAP()
110 if (hash_default) 109 if (hash_default)
111 return PAGE_HOME_HASH; 110 return PAGE_HOME_HASH;
112#endif
113 return smp_processor_id(); 111 return smp_processor_id();
114} 112}
115 113
@@ -190,14 +188,11 @@ static void __init page_table_range_init(unsigned long start,
190} 188}
191 189
192 190
193#if CHIP_HAS_CBOX_HOME_MAP()
194
195static int __initdata ktext_hash = 1; /* .text pages */ 191static int __initdata ktext_hash = 1; /* .text pages */
196static int __initdata kdata_hash = 1; /* .data and .bss pages */ 192static int __initdata kdata_hash = 1; /* .data and .bss pages */
197int __write_once hash_default = 1; /* kernel allocator pages */ 193int __write_once hash_default = 1; /* kernel allocator pages */
198EXPORT_SYMBOL(hash_default); 194EXPORT_SYMBOL(hash_default);
199int __write_once kstack_hash = 1; /* if no homecaching, use h4h */ 195int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
200#endif /* CHIP_HAS_CBOX_HOME_MAP */
201 196
202/* 197/*
203 * CPUs to use to for striping the pages of kernel data. If hash-for-home 198 * CPUs to use to for striping the pages of kernel data. If hash-for-home
@@ -215,14 +210,12 @@ int __write_once kdata_huge; /* if no homecaching, small pages */
215static pgprot_t __init construct_pgprot(pgprot_t prot, int home) 210static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
216{ 211{
217 prot = pte_set_home(prot, home); 212 prot = pte_set_home(prot, home);
218#if CHIP_HAS_CBOX_HOME_MAP()
219 if (home == PAGE_HOME_IMMUTABLE) { 213 if (home == PAGE_HOME_IMMUTABLE) {
220 if (ktext_hash) 214 if (ktext_hash)
221 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3); 215 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
222 else 216 else
223 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); 217 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
224 } 218 }
225#endif
226 return prot; 219 return prot;
227} 220}
228 221
@@ -234,22 +227,17 @@ static pgprot_t __init init_pgprot(ulong address)
234{ 227{
235 int cpu; 228 int cpu;
236 unsigned long page; 229 unsigned long page;
237 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 230 enum { CODE_DELTA = MEM_SV_START - PAGE_OFFSET };
238 231
239#if CHIP_HAS_CBOX_HOME_MAP()
240 /* For kdata=huge, everything is just hash-for-home. */ 232 /* For kdata=huge, everything is just hash-for-home. */
241 if (kdata_huge) 233 if (kdata_huge)
242 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 234 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
243#endif
244 235
245 /* We map the aliased pages of permanent text inaccessible. */ 236 /* We map the aliased pages of permanent text inaccessible. */
246 if (address < (ulong) _sinittext - CODE_DELTA) 237 if (address < (ulong) _sinittext - CODE_DELTA)
247 return PAGE_NONE; 238 return PAGE_NONE;
248 239
249 /* 240 /* We map read-only data non-coherent for performance. */
250 * We map read-only data non-coherent for performance. We could
251 * use neighborhood caching on TILE64, but it's not clear it's a win.
252 */
253 if ((address >= (ulong) __start_rodata && 241 if ((address >= (ulong) __start_rodata &&
254 address < (ulong) __end_rodata) || 242 address < (ulong) __end_rodata) ||
255 address == (ulong) empty_zero_page) { 243 address == (ulong) empty_zero_page) {
@@ -257,12 +245,10 @@ static pgprot_t __init init_pgprot(ulong address)
257 } 245 }
258 246
259#ifndef __tilegx__ 247#ifndef __tilegx__
260#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
261 /* Force the atomic_locks[] array page to be hash-for-home. */ 248 /* Force the atomic_locks[] array page to be hash-for-home. */
262 if (address == (ulong) atomic_locks) 249 if (address == (ulong) atomic_locks)
263 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 250 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
264#endif 251#endif
265#endif
266 252
267 /* 253 /*
268 * Everything else that isn't data or bss is heap, so mark it 254 * Everything else that isn't data or bss is heap, so mark it
@@ -280,19 +266,9 @@ static pgprot_t __init init_pgprot(ulong address)
280 if (address >= (ulong) _end || address < (ulong) _einitdata) 266 if (address >= (ulong) _end || address < (ulong) _einitdata)
281 return construct_pgprot(PAGE_KERNEL, initial_heap_home()); 267 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
282 268
283#if CHIP_HAS_CBOX_HOME_MAP()
284 /* Use hash-for-home if requested for data/bss. */ 269 /* Use hash-for-home if requested for data/bss. */
285 if (kdata_hash) 270 if (kdata_hash)
286 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH); 271 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
287#endif
288
289 /*
290 * Make the w1data homed like heap to start with, to avoid
291 * making it part of the page-striped data area when we're just
292 * going to convert it to read-only soon anyway.
293 */
294 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
295 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
296 272
297 /* 273 /*
298 * Otherwise we just hand out consecutive cpus. To avoid 274 * Otherwise we just hand out consecutive cpus. To avoid
@@ -301,7 +277,7 @@ static pgprot_t __init init_pgprot(ulong address)
301 * the requested address, while walking cpu home around kdata_mask. 277 * the requested address, while walking cpu home around kdata_mask.
302 * This is typically no more than a dozen or so iterations. 278 * This is typically no more than a dozen or so iterations.
303 */ 279 */
304 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK; 280 page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK;
305 BUG_ON(address < page || address >= (ulong)_end); 281 BUG_ON(address < page || address >= (ulong)_end);
306 cpu = cpumask_first(&kdata_mask); 282 cpu = cpumask_first(&kdata_mask);
307 for (; page < address; page += PAGE_SIZE) { 283 for (; page < address; page += PAGE_SIZE) {
@@ -311,11 +287,9 @@ static pgprot_t __init init_pgprot(ulong address)
311 if (page == (ulong)empty_zero_page) 287 if (page == (ulong)empty_zero_page)
312 continue; 288 continue;
313#ifndef __tilegx__ 289#ifndef __tilegx__
314#if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
315 if (page == (ulong)atomic_locks) 290 if (page == (ulong)atomic_locks)
316 continue; 291 continue;
317#endif 292#endif
318#endif
319 cpu = cpumask_next(cpu, &kdata_mask); 293 cpu = cpumask_next(cpu, &kdata_mask);
320 if (cpu == NR_CPUS) 294 if (cpu == NR_CPUS)
321 cpu = cpumask_first(&kdata_mask); 295 cpu = cpumask_first(&kdata_mask);
@@ -358,7 +332,7 @@ static int __init setup_ktext(char *str)
358 332
359 ktext_arg_seen = 1; 333 ktext_arg_seen = 1;
360 334
361 /* Default setting on Tile64: use a huge page */ 335 /* Default setting: use a huge page */
362 if (strcmp(str, "huge") == 0) 336 if (strcmp(str, "huge") == 0)
363 pr_info("ktext: using one huge locally cached page\n"); 337 pr_info("ktext: using one huge locally cached page\n");
364 338
@@ -404,10 +378,8 @@ static inline pgprot_t ktext_set_nocache(pgprot_t prot)
404{ 378{
405 if (!ktext_nocache) 379 if (!ktext_nocache)
406 prot = hv_pte_set_nc(prot); 380 prot = hv_pte_set_nc(prot);
407#if CHIP_HAS_NC_AND_NOALLOC_BITS()
408 else 381 else
409 prot = hv_pte_set_no_alloc_l2(prot); 382 prot = hv_pte_set_no_alloc_l2(prot);
410#endif
411 return prot; 383 return prot;
412} 384}
413 385
@@ -440,7 +412,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
440 struct cpumask kstripe_mask; 412 struct cpumask kstripe_mask;
441 int rc, i; 413 int rc, i;
442 414
443#if CHIP_HAS_CBOX_HOME_MAP()
444 if (ktext_arg_seen && ktext_hash) { 415 if (ktext_arg_seen && ktext_hash) {
445 pr_warning("warning: \"ktext\" boot argument ignored" 416 pr_warning("warning: \"ktext\" boot argument ignored"
446 " if \"kcache_hash\" sets up text hash-for-home\n"); 417 " if \"kcache_hash\" sets up text hash-for-home\n");
@@ -457,7 +428,6 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
457 " kcache_hash=all or =allbutstack\n"); 428 " kcache_hash=all or =allbutstack\n");
458 kdata_huge = 0; 429 kdata_huge = 0;
459 } 430 }
460#endif
461 431
462 /* 432 /*
463 * Set up a mask for cpus to use for kernel striping. 433 * Set up a mask for cpus to use for kernel striping.
@@ -538,7 +508,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
538 } 508 }
539 } 509 }
540 510
541 address = MEM_SV_INTRPT; 511 address = MEM_SV_START;
542 pmd = get_pmd(pgtables, address); 512 pmd = get_pmd(pgtables, address);
543 pfn = 0; /* code starts at PA 0 */ 513 pfn = 0; /* code starts at PA 0 */
544 if (ktext_small) { 514 if (ktext_small) {
@@ -585,13 +555,11 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
585 } else { 555 } else {
586 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); 556 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
587 pteval = pte_mkhuge(pteval); 557 pteval = pte_mkhuge(pteval);
588#if CHIP_HAS_CBOX_HOME_MAP()
589 if (ktext_hash) { 558 if (ktext_hash) {
590 pteval = hv_pte_set_mode(pteval, 559 pteval = hv_pte_set_mode(pteval,
591 HV_PTE_MODE_CACHE_HASH_L3); 560 HV_PTE_MODE_CACHE_HASH_L3);
592 pteval = ktext_set_nocache(pteval); 561 pteval = ktext_set_nocache(pteval);
593 } else 562 } else
594#endif /* CHIP_HAS_CBOX_HOME_MAP() */
595 if (cpumask_weight(&ktext_mask) == 1) { 563 if (cpumask_weight(&ktext_mask) == 1) {
596 pteval = set_remote_cache_cpu(pteval, 564 pteval = set_remote_cache_cpu(pteval,
597 cpumask_first(&ktext_mask)); 565 cpumask_first(&ktext_mask));
@@ -777,10 +745,7 @@ void __init paging_init(void)
777 745
778 kernel_physical_mapping_init(pgd_base); 746 kernel_physical_mapping_init(pgd_base);
779 747
780 /* 748 /* Fixed mappings, only the page table structure has to be created. */
781 * Fixed mappings, only the page table structure has to be
782 * created - mappings will be set by set_fixmap():
783 */
784 page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1), 749 page_table_range_init(fix_to_virt(__end_of_fixed_addresses - 1),
785 FIXADDR_TOP, pgd_base); 750 FIXADDR_TOP, pgd_base);
786 751
@@ -941,26 +906,6 @@ void __init pgtable_cache_init(void)
941 panic("pgtable_cache_init(): Cannot create pgd cache"); 906 panic("pgtable_cache_init(): Cannot create pgd cache");
942} 907}
943 908
944#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
945/*
946 * The __w1data area holds data that is only written during initialization,
947 * and is read-only and thus freely cacheable thereafter. Fix the page
948 * table entries that cover that region accordingly.
949 */
950static void mark_w1data_ro(void)
951{
952 /* Loop over page table entries */
953 unsigned long addr = (unsigned long)__w1data_begin;
954 BUG_ON((addr & (PAGE_SIZE-1)) != 0);
955 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
956 unsigned long pfn = kaddr_to_pfn((void *)addr);
957 pte_t *ptep = virt_to_pte(NULL, addr);
958 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
959 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
960 }
961}
962#endif
963
964#ifdef CONFIG_DEBUG_PAGEALLOC 909#ifdef CONFIG_DEBUG_PAGEALLOC
965static long __write_once initfree; 910static long __write_once initfree;
966#else 911#else
@@ -1000,7 +945,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
1000 */ 945 */
1001 int pfn = kaddr_to_pfn((void *)addr); 946 int pfn = kaddr_to_pfn((void *)addr);
1002 struct page *page = pfn_to_page(pfn); 947 struct page *page = pfn_to_page(pfn);
1003 pte_t *ptep = virt_to_pte(NULL, addr); 948 pte_t *ptep = virt_to_kpte(addr);
1004 if (!initfree) { 949 if (!initfree) {
1005 /* 950 /*
1006 * If debugging page accesses then do not free 951 * If debugging page accesses then do not free
@@ -1024,15 +969,11 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
1024 969
1025void free_initmem(void) 970void free_initmem(void)
1026{ 971{
1027 const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET; 972 const unsigned long text_delta = MEM_SV_START - PAGE_OFFSET;
1028 973
1029 /* 974 /*
1030 * Evict the dirty initdata on the boot cpu, evict the w1data 975 * Evict the cache on all cores to avoid incoherence.
1031 * wherever it's homed, and evict all the init code everywhere. 976 * We are guaranteed that no one will touch the init pages any more.
1032 * We are guaranteed that no one will touch the init pages any
1033 * more, and although other cpus may be touching the w1data,
1034 * we only actually change the caching on tile64, which won't
1035 * be keeping local copies in the other tiles' caches anyway.
1036 */ 977 */
1037 homecache_evict(&cpu_cacheable_map); 978 homecache_evict(&cpu_cacheable_map);
1038 979
@@ -1043,26 +984,11 @@ void free_initmem(void)
1043 984
1044 /* 985 /*
1045 * Free the pages mapped from 0xc0000000 that correspond to code 986 * Free the pages mapped from 0xc0000000 that correspond to code
1046 * pages from MEM_SV_INTRPT that we won't use again after init. 987 * pages from MEM_SV_START that we won't use again after init.
1047 */ 988 */
1048 free_init_pages("unused kernel text", 989 free_init_pages("unused kernel text",
1049 (unsigned long)_sinittext - text_delta, 990 (unsigned long)_sinittext - text_delta,
1050 (unsigned long)_einittext - text_delta); 991 (unsigned long)_einittext - text_delta);
1051
1052#if !CHIP_HAS_COHERENT_LOCAL_CACHE()
1053 /*
1054 * Upgrade the .w1data section to globally cached.
1055 * We don't do this on tilepro, since the cache architecture
1056 * pretty much makes it irrelevant, and in any case we end
1057 * up having racing issues with other tiles that may touch
1058 * the data after we flush the cache but before we update
1059 * the PTEs and flush the TLBs, causing sharer shootdowns
1060 * later. Even though this is to clean data, it seems like
1061 * an unnecessary complication.
1062 */
1063 mark_w1data_ro();
1064#endif
1065
1066 /* Do a global TLB flush so everyone sees the changes. */ 992 /* Do a global TLB flush so everyone sees the changes. */
1067 flush_tlb_all(); 993 flush_tlb_all();
1068} 994}
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
index 5305814bf187..772085491bf9 100644
--- a/arch/tile/mm/migrate_32.S
+++ b/arch/tile/mm/migrate_32.S
@@ -136,7 +136,7 @@ STD_ENTRY(flush_and_install_context)
136 move r8, zero /* asids */ 136 move r8, zero /* asids */
137 move r9, zero /* asidcount */ 137 move r9, zero /* asidcount */
138 } 138 }
139 jal hv_flush_remote 139 jal _hv_flush_remote
140 bnz r0, .Ldone 140 bnz r0, .Ldone
141 141
142 /* Now install the new page table. */ 142 /* Now install the new page table. */
@@ -152,7 +152,7 @@ STD_ENTRY(flush_and_install_context)
152 move r4, r_asid 152 move r4, r_asid
153 moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG 153 moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
154 } 154 }
155 jal hv_install_context 155 jal _hv_install_context
156 bnz r0, .Ldone 156 bnz r0, .Ldone
157 157
158 /* Finally, flush the TLB. */ 158 /* Finally, flush the TLB. */
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
index 1d15b10833d1..a49eee38f872 100644
--- a/arch/tile/mm/migrate_64.S
+++ b/arch/tile/mm/migrate_64.S
@@ -123,7 +123,7 @@ STD_ENTRY(flush_and_install_context)
123 } 123 }
124 { 124 {
125 move r8, zero /* asidcount */ 125 move r8, zero /* asidcount */
126 jal hv_flush_remote 126 jal _hv_flush_remote
127 } 127 }
128 bnez r0, 1f 128 bnez r0, 1f
129 129
@@ -136,7 +136,7 @@ STD_ENTRY(flush_and_install_context)
136 move r2, r_asid 136 move r2, r_asid
137 moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG 137 moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
138 } 138 }
139 jal hv_install_context 139 jal _hv_install_context
140 bnez r0, 1f 140 bnez r0, 1f
141 141
142 /* Finally, flush the TLB. */ 142 /* Finally, flush the TLB. */
diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c
index d67d91ebf63e..851a94e6ae58 100644
--- a/arch/tile/mm/mmap.c
+++ b/arch/tile/mm/mmap.c
@@ -58,16 +58,36 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
58#else 58#else
59 int is_32bit = 0; 59 int is_32bit = 0;
60#endif 60#endif
61 unsigned long random_factor = 0UL;
62
63 /*
64 * 8 bits of randomness in 32bit mmaps, 24 address space bits
65 * 12 bits of randomness in 64bit mmaps, 28 address space bits
66 */
67 if (current->flags & PF_RANDOMIZE) {
68 if (is_32bit)
69 random_factor = get_random_int() % (1<<8);
70 else
71 random_factor = get_random_int() % (1<<12);
72
73 random_factor <<= PAGE_SHIFT;
74 }
61 75
62 /* 76 /*
63 * Use standard layout if the expected stack growth is unlimited 77 * Use standard layout if the expected stack growth is unlimited
64 * or we are running native 64 bits. 78 * or we are running native 64 bits.
65 */ 79 */
66 if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { 80 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) {
67 mm->mmap_base = TASK_UNMAPPED_BASE; 81 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
68 mm->get_unmapped_area = arch_get_unmapped_area; 82 mm->get_unmapped_area = arch_get_unmapped_area;
69 } else { 83 } else {
70 mm->mmap_base = mmap_base(mm); 84 mm->mmap_base = mmap_base(mm);
71 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 85 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
72 } 86 }
73} 87}
88
89unsigned long arch_randomize_brk(struct mm_struct *mm)
90{
91 unsigned long range_end = mm->brk + 0x02000000;
92 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
93}
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index dfd63ce87327..2deaddf3e01f 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -83,55 +83,6 @@ void show_mem(unsigned int filter)
83 } 83 }
84} 84}
85 85
86/*
87 * Associate a virtual page frame with a given physical page frame
88 * and protection flags for that frame.
89 */
90static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
91{
92 pgd_t *pgd;
93 pud_t *pud;
94 pmd_t *pmd;
95 pte_t *pte;
96
97 pgd = swapper_pg_dir + pgd_index(vaddr);
98 if (pgd_none(*pgd)) {
99 BUG();
100 return;
101 }
102 pud = pud_offset(pgd, vaddr);
103 if (pud_none(*pud)) {
104 BUG();
105 return;
106 }
107 pmd = pmd_offset(pud, vaddr);
108 if (pmd_none(*pmd)) {
109 BUG();
110 return;
111 }
112 pte = pte_offset_kernel(pmd, vaddr);
113 /* <pfn,flags> stored as-is, to permit clearing entries */
114 set_pte(pte, pfn_pte(pfn, flags));
115
116 /*
117 * It's enough to flush this one mapping.
118 * This appears conservative since it is only called
119 * from __set_fixmap.
120 */
121 local_flush_tlb_page(NULL, vaddr, PAGE_SIZE);
122}
123
124void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
125{
126 unsigned long address = __fix_to_virt(idx);
127
128 if (idx >= __end_of_fixed_addresses) {
129 BUG();
130 return;
131 }
132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
133}
134
135/** 86/**
136 * shatter_huge_page() - ensure a given address is mapped by a small page. 87 * shatter_huge_page() - ensure a given address is mapped by a small page.
137 * 88 *
@@ -374,6 +325,17 @@ void ptep_set_wrprotect(struct mm_struct *mm,
374 325
375#endif 326#endif
376 327
328/*
329 * Return a pointer to the PTE that corresponds to the given
330 * address in the given page table. A NULL page table just uses
331 * the standard kernel page table; the preferred API in this case
332 * is virt_to_kpte().
333 *
334 * The returned pointer can point to a huge page in other levels
335 * of the page table than the bottom, if the huge page is present
336 * in the page table. For bottom-level PTEs, the returned pointer
337 * can point to a PTE that is either present or not.
338 */
377pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) 339pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
378{ 340{
379 pgd_t *pgd; 341 pgd_t *pgd;
@@ -387,13 +349,23 @@ pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr)
387 pud = pud_offset(pgd, addr); 349 pud = pud_offset(pgd, addr);
388 if (!pud_present(*pud)) 350 if (!pud_present(*pud))
389 return NULL; 351 return NULL;
352 if (pud_huge_page(*pud))
353 return (pte_t *)pud;
390 pmd = pmd_offset(pud, addr); 354 pmd = pmd_offset(pud, addr);
391 if (pmd_huge_page(*pmd))
392 return (pte_t *)pmd;
393 if (!pmd_present(*pmd)) 355 if (!pmd_present(*pmd))
394 return NULL; 356 return NULL;
357 if (pmd_huge_page(*pmd))
358 return (pte_t *)pmd;
395 return pte_offset_kernel(pmd, addr); 359 return pte_offset_kernel(pmd, addr);
396} 360}
361EXPORT_SYMBOL(virt_to_pte);
362
363pte_t *virt_to_kpte(unsigned long kaddr)
364{
365 BUG_ON(kaddr < PAGE_OFFSET);
366 return virt_to_pte(NULL, kaddr);
367}
368EXPORT_SYMBOL(virt_to_kpte);
397 369
398pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) 370pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu)
399{ 371{
@@ -568,7 +540,7 @@ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
568 addr = area->addr; 540 addr = area->addr;
569 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, 541 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
570 phys_addr, pgprot)) { 542 phys_addr, pgprot)) {
571 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 543 free_vm_area(area);
572 return NULL; 544 return NULL;
573 } 545 }
574 return (__force void __iomem *) (offset + (char *)addr); 546 return (__force void __iomem *) (offset + (char *)addr);
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a1..29b0301c18aa 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
45} 45}
46 46
47static inline void 47static inline void
48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) 48tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
49{ 49{
50 tlb->mm = mm; 50 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 51 tlb->start = start;
52 tlb->end = end;
53 tlb->fullmm = !(start | (end+1));
52 54
53 init_tlb_gather(tlb); 55 init_tlb_gather(tlb);
54} 56}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b32ebf92b0ce..5c0ed72c02a2 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -16,6 +16,7 @@ config X86_64
16 def_bool y 16 def_bool y
17 depends on 64BIT 17 depends on 64BIT
18 select X86_DEV_DMA_OPS 18 select X86_DEV_DMA_OPS
19 select ARCH_USE_CMPXCHG_LOCKREF
19 20
20### Arch settings 21### Arch settings
21config X86 22config X86
@@ -81,7 +82,6 @@ config X86
81 select HAVE_USER_RETURN_NOTIFIER 82 select HAVE_USER_RETURN_NOTIFIER
82 select ARCH_BINFMT_ELF_RANDOMIZE_PIE 83 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
83 select HAVE_ARCH_JUMP_LABEL 84 select HAVE_ARCH_JUMP_LABEL
84 select HAVE_TEXT_POKE_SMP
85 select HAVE_GENERIC_HARDIRQS 85 select HAVE_GENERIC_HARDIRQS
86 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 86 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
87 select SPARSE_IRQ 87 select SPARSE_IRQ
@@ -632,6 +632,7 @@ config PARAVIRT_DEBUG
632config PARAVIRT_SPINLOCKS 632config PARAVIRT_SPINLOCKS
633 bool "Paravirtualization layer for spinlocks" 633 bool "Paravirtualization layer for spinlocks"
634 depends on PARAVIRT && SMP 634 depends on PARAVIRT && SMP
635 select UNINLINE_SPIN_UNLOCK
635 ---help--- 636 ---help---
636 Paravirtualized spinlocks allow a pvops backend to replace the 637 Paravirtualized spinlocks allow a pvops backend to replace the
637 spinlock implementation with something virtualization-friendly 638 spinlock implementation with something virtualization-friendly
@@ -656,6 +657,15 @@ config KVM_GUEST
656 underlying device model, the host provides the guest with 657 underlying device model, the host provides the guest with
657 timing infrastructure such as time of day, and system time 658 timing infrastructure such as time of day, and system time
658 659
660config KVM_DEBUG_FS
661 bool "Enable debug information for KVM Guests in debugfs"
662 depends on KVM_GUEST && DEBUG_FS
663 default n
664 ---help---
665 This option enables collection of various statistics for KVM guest.
666 Statistics are displayed in debugfs filesystem. Enabling this option
667 may incur significant overhead.
668
659source "arch/x86/lguest/Kconfig" 669source "arch/x86/lguest/Kconfig"
660 670
661config PARAVIRT_TIME_ACCOUNTING 671config PARAVIRT_TIME_ACCOUNTING
@@ -1344,8 +1354,12 @@ config ARCH_SELECT_MEMORY_MODEL
1344 depends on ARCH_SPARSEMEM_ENABLE 1354 depends on ARCH_SPARSEMEM_ENABLE
1345 1355
1346config ARCH_MEMORY_PROBE 1356config ARCH_MEMORY_PROBE
1347 def_bool y 1357 bool "Enable sysfs memory/probe interface"
1348 depends on X86_64 && MEMORY_HOTPLUG 1358 depends on X86_64 && MEMORY_HOTPLUG
1359 help
1360 This option enables a sysfs memory/probe interface for testing.
1361 See Documentation/memory-hotplug.txt for more information.
1362 If you are unsure how to answer this question, answer N.
1349 1363
1350config ARCH_PROC_KCORE_TEXT 1364config ARCH_PROC_KCORE_TEXT
1351 def_bool y 1365 def_bool y
@@ -1627,9 +1641,9 @@ config KEXEC
1627 1641
1628 It is an ongoing process to be certain the hardware in a machine 1642 It is an ongoing process to be certain the hardware in a machine
1629 is properly shutdown, so do not be surprised if this code does not 1643 is properly shutdown, so do not be surprised if this code does not
1630 initially work for you. It may help to enable device hotplugging 1644 initially work for you. As of this writing the exact hardware
1631 support. As of this writing the exact hardware interface is 1645 interface is strongly in flux, so no good recommendation can be
1632 strongly in flux, so no good recommendation can be made. 1646 made.
1633 1647
1634config CRASH_DUMP 1648config CRASH_DUMP
1635 bool "kernel crash dumps" 1649 bool "kernel crash dumps"
@@ -1716,9 +1730,10 @@ config X86_NEED_RELOCS
1716 depends on X86_32 && RELOCATABLE 1730 depends on X86_32 && RELOCATABLE
1717 1731
1718config PHYSICAL_ALIGN 1732config PHYSICAL_ALIGN
1719 hex "Alignment value to which kernel should be aligned" if X86_32 1733 hex "Alignment value to which kernel should be aligned"
1720 default "0x1000000" 1734 default "0x1000000"
1721 range 0x2000 0x1000000 1735 range 0x2000 0x1000000 if X86_32
1736 range 0x200000 0x1000000 if X86_64
1722 ---help--- 1737 ---help---
1723 This value puts the alignment restrictions on physical address 1738 This value puts the alignment restrictions on physical address
1724 where kernel is loaded and run from. Kernel is compiled for an 1739 where kernel is loaded and run from. Kernel is compiled for an
@@ -1736,6 +1751,9 @@ config PHYSICAL_ALIGN
1736 end result is that kernel runs from a physical address meeting 1751 end result is that kernel runs from a physical address meeting
1737 above alignment restrictions. 1752 above alignment restrictions.
1738 1753
1754 On 32-bit this value must be a multiple of 0x2000. On 64-bit
1755 this value must be a multiple of 0x200000.
1756
1739 Don't change this unless you know what you are doing. 1757 Don't change this unless you know what you are doing.
1740 1758
1741config HOTPLUG_CPU 1759config HOTPLUG_CPU
@@ -2270,6 +2288,32 @@ config RAPIDIO
2270 2288
2271source "drivers/rapidio/Kconfig" 2289source "drivers/rapidio/Kconfig"
2272 2290
2291config X86_SYSFB
2292 bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
2293 help
2294 Firmwares often provide initial graphics framebuffers so the BIOS,
2295 bootloader or kernel can show basic video-output during boot for
2296 user-guidance and debugging. Historically, x86 used the VESA BIOS
2297 Extensions and EFI-framebuffers for this, which are mostly limited
2298 to x86.
2299 This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
2300 framebuffers so the new generic system-framebuffer drivers can be
2301 used on x86. If the framebuffer is not compatible with the generic
2302 modes, it is adverticed as fallback platform framebuffer so legacy
2303 drivers like efifb, vesafb and uvesafb can pick it up.
2304 If this option is not selected, all system framebuffers are always
2305 marked as fallback platform framebuffers as usual.
2306
2307 Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
2308 not be able to pick up generic system framebuffers if this option
2309 is selected. You are highly encouraged to enable simplefb as
2310 replacement if you select this option. simplefb can correctly deal
2311 with generic system framebuffers. But you should still keep vesafb
2312 and others enabled as fallback if a system framebuffer is
2313 incompatible with simplefb.
2314
2315 If unsure, say Y.
2316
2273endmenu 2317endmenu
2274 2318
2275 2319
@@ -2332,10 +2376,6 @@ config HAVE_ATOMIC_IOMAP
2332 def_bool y 2376 def_bool y
2333 depends on X86_32 2377 depends on X86_32
2334 2378
2335config HAVE_TEXT_POKE_SMP
2336 bool
2337 select STOP_MACHINE if SMP
2338
2339config X86_DEV_DMA_OPS 2379config X86_DEV_DMA_OPS
2340 bool 2380 bool
2341 depends on X86_64 || STA2X11 2381 depends on X86_64 || STA2X11
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 07639c656fcd..41250fb33985 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -16,6 +16,10 @@ endif
16# e.g.: obj-y += foo_$(BITS).o 16# e.g.: obj-y += foo_$(BITS).o
17export BITS 17export BITS
18 18
19ifdef CONFIG_X86_NEED_RELOCS
20 LDFLAGS_vmlinux := --emit-relocs
21endif
22
19ifeq ($(CONFIG_X86_32),y) 23ifeq ($(CONFIG_X86_32),y)
20 BITS := 32 24 BITS := 32
21 UTS_MACHINE := i386 25 UTS_MACHINE := i386
@@ -25,10 +29,6 @@ ifeq ($(CONFIG_X86_32),y)
25 KBUILD_AFLAGS += $(biarch) 29 KBUILD_AFLAGS += $(biarch)
26 KBUILD_CFLAGS += $(biarch) 30 KBUILD_CFLAGS += $(biarch)
27 31
28 ifdef CONFIG_RELOCATABLE
29 LDFLAGS_vmlinux := --emit-relocs
30 endif
31
32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return 32 KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
33 33
34 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built 34 # Never want PIC in a 32-bit kernel, prevent breakage with GCC built
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 5b7531966b84..ef72baeff484 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -355,6 +355,7 @@ int strncmp(const char *cs, const char *ct, size_t count);
355size_t strnlen(const char *s, size_t maxlen); 355size_t strnlen(const char *s, size_t maxlen);
356unsigned int atou(const char *s); 356unsigned int atou(const char *s);
357unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base); 357unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base);
358size_t strlen(const char *s);
358 359
359/* tty.c */ 360/* tty.c */
360void puts(const char *); 361void puts(const char *);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463aa6d6..b7388a425f09 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
225 unsigned long nr_pages; 225 unsigned long nr_pages;
226 226
227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; 227 nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
228 efi_call_phys2(sys_table->boottime->free_pages, addr, size); 228 efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
229} 229}
230 230
231static void find_bits(unsigned long mask, u8 *pos, u8 *size) 231static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 1e3184f6072f..5d6f6891b188 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -181,8 +181,9 @@ relocated:
181/* 181/*
182 * Do the decompression, and jump to the new kernel.. 182 * Do the decompression, and jump to the new kernel..
183 */ 183 */
184 leal z_extract_offset_negative(%ebx), %ebp
185 /* push arguments for decompress_kernel: */ 184 /* push arguments for decompress_kernel: */
185 pushl $z_output_len /* decompressed length */
186 leal z_extract_offset_negative(%ebx), %ebp
186 pushl %ebp /* output address */ 187 pushl %ebp /* output address */
187 pushl $z_input_len /* input_len */ 188 pushl $z_input_len /* input_len */
188 leal input_data(%ebx), %eax 189 leal input_data(%ebx), %eax
@@ -191,33 +192,7 @@ relocated:
191 pushl %eax /* heap area */ 192 pushl %eax /* heap area */
192 pushl %esi /* real mode pointer */ 193 pushl %esi /* real mode pointer */
193 call decompress_kernel 194 call decompress_kernel
194 addl $20, %esp 195 addl $24, %esp
195
196#if CONFIG_RELOCATABLE
197/*
198 * Find the address of the relocations.
199 */
200 leal z_output_len(%ebp), %edi
201
202/*
203 * Calculate the delta between where vmlinux was compiled to run
204 * and where it was actually loaded.
205 */
206 movl %ebp, %ebx
207 subl $LOAD_PHYSICAL_ADDR, %ebx
208 jz 2f /* Nothing to be done if loaded at compiled addr. */
209/*
210 * Process relocations.
211 */
212
2131: subl $4, %edi
214 movl (%edi), %ecx
215 testl %ecx, %ecx
216 jz 2f
217 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
218 jmp 1b
2192:
220#endif
221 196
222/* 197/*
223 * Jump to the decompressed kernel. 198 * Jump to the decompressed kernel.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 06e71c2c16bf..c337422b575d 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -338,6 +338,7 @@ relocated:
338 leaq input_data(%rip), %rdx /* input_data */ 338 leaq input_data(%rip), %rdx /* input_data */
339 movl $z_input_len, %ecx /* input_len */ 339 movl $z_input_len, %ecx /* input_len */
340 movq %rbp, %r8 /* output target address */ 340 movq %rbp, %r8 /* output target address */
341 movq $z_output_len, %r9 /* decompressed length */
341 call decompress_kernel 342 call decompress_kernel
342 popq %rsi 343 popq %rsi
343 344
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 0319c88290a5..434f077d2c4d 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -271,6 +271,79 @@ static void error(char *x)
271 asm("hlt"); 271 asm("hlt");
272} 272}
273 273
274#if CONFIG_X86_NEED_RELOCS
275static void handle_relocations(void *output, unsigned long output_len)
276{
277 int *reloc;
278 unsigned long delta, map, ptr;
279 unsigned long min_addr = (unsigned long)output;
280 unsigned long max_addr = min_addr + output_len;
281
282 /*
283 * Calculate the delta between where vmlinux was linked to load
284 * and where it was actually loaded.
285 */
286 delta = min_addr - LOAD_PHYSICAL_ADDR;
287 if (!delta) {
288 debug_putstr("No relocation needed... ");
289 return;
290 }
291 debug_putstr("Performing relocations... ");
292
293 /*
294 * The kernel contains a table of relocation addresses. Those
295 * addresses have the final load address of the kernel in virtual
296 * memory. We are currently working in the self map. So we need to
297 * create an adjustment for kernel memory addresses to the self map.
298 * This will involve subtracting out the base address of the kernel.
299 */
300 map = delta - __START_KERNEL_map;
301
302 /*
303 * Process relocations: 32 bit relocations first then 64 bit after.
304 * Two sets of binary relocations are added to the end of the kernel
305 * before compression. Each relocation table entry is the kernel
306 * address of the location which needs to be updated stored as a
307 * 32-bit value which is sign extended to 64 bits.
308 *
309 * Format is:
310 *
311 * kernel bits...
312 * 0 - zero terminator for 64 bit relocations
313 * 64 bit relocation repeated
314 * 0 - zero terminator for 32 bit relocations
315 * 32 bit relocation repeated
316 *
317 * So we work backwards from the end of the decompressed image.
318 */
319 for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
320 int extended = *reloc;
321 extended += map;
322
323 ptr = (unsigned long)extended;
324 if (ptr < min_addr || ptr > max_addr)
325 error("32-bit relocation outside of kernel!\n");
326
327 *(uint32_t *)ptr += delta;
328 }
329#ifdef CONFIG_X86_64
330 for (reloc--; *reloc; reloc--) {
331 long extended = *reloc;
332 extended += map;
333
334 ptr = (unsigned long)extended;
335 if (ptr < min_addr || ptr > max_addr)
336 error("64-bit relocation outside of kernel!\n");
337
338 *(uint64_t *)ptr += delta;
339 }
340#endif
341}
342#else
343static inline void handle_relocations(void *output, unsigned long output_len)
344{ }
345#endif
346
274static void parse_elf(void *output) 347static void parse_elf(void *output)
275{ 348{
276#ifdef CONFIG_X86_64 349#ifdef CONFIG_X86_64
@@ -325,7 +398,8 @@ static void parse_elf(void *output)
325asmlinkage void decompress_kernel(void *rmode, memptr heap, 398asmlinkage void decompress_kernel(void *rmode, memptr heap,
326 unsigned char *input_data, 399 unsigned char *input_data,
327 unsigned long input_len, 400 unsigned long input_len,
328 unsigned char *output) 401 unsigned char *output,
402 unsigned long output_len)
329{ 403{
330 real_mode = rmode; 404 real_mode = rmode;
331 405
@@ -365,6 +439,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
365 debug_putstr("\nDecompressing Linux... "); 439 debug_putstr("\nDecompressing Linux... ");
366 decompress(input_data, input_len, NULL, NULL, output, NULL, error); 440 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
367 parse_elf(output); 441 parse_elf(output);
442 handle_relocations(output, output_len);
368 debug_putstr("done.\nBooting the kernel.\n"); 443 debug_putstr("done.\nBooting the kernel.\n");
369 return; 444 return;
370} 445}
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c
index cdac91ca55d3..565083c16e5c 100644
--- a/arch/x86/boot/printf.c
+++ b/arch/x86/boot/printf.c
@@ -55,7 +55,7 @@ static char *number(char *str, long num, int base, int size, int precision,
55 locase = (type & SMALL); 55 locase = (type & SMALL);
56 if (type & LEFT) 56 if (type & LEFT)
57 type &= ~ZEROPAD; 57 type &= ~ZEROPAD;
58 if (base < 2 || base > 36) 58 if (base < 2 || base > 16)
59 return NULL; 59 return NULL;
60 c = (type & ZEROPAD) ? '0' : ' '; 60 c = (type & ZEROPAD) ? '0' : ' ';
61 sign = 0; 61 sign = 0;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index bccfca68430e..665a730307f2 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -457,7 +457,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
457 else 457 else
458 put_user_ex(0, &frame->uc.uc_flags); 458 put_user_ex(0, &frame->uc.uc_flags);
459 put_user_ex(0, &frame->uc.uc_link); 459 put_user_ex(0, &frame->uc.uc_link);
460 err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); 460 compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
461 461
462 if (ksig->ka.sa.sa_flags & SA_RESTORER) 462 if (ksig->ka.sa.sa_flags & SA_RESTORER)
463 restorer = ksig->ka.sa.sa_restorer; 463 restorer = ksig->ka.sa.sa_restorer;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 474dc1b59f72..4299eb05023c 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -452,7 +452,7 @@ ia32_badsys:
452 452
453 CFI_ENDPROC 453 CFI_ENDPROC
454 454
455 .macro PTREGSCALL label, func, arg 455 .macro PTREGSCALL label, func
456 ALIGN 456 ALIGN
457GLOBAL(\label) 457GLOBAL(\label)
458 leaq \func(%rip),%rax 458 leaq \func(%rip),%rax
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 2dfac58f3b11..b1977bad5435 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -86,6 +86,7 @@ extern int acpi_pci_disabled;
86extern int acpi_skip_timer_override; 86extern int acpi_skip_timer_override;
87extern int acpi_use_timer_override; 87extern int acpi_use_timer_override;
88extern int acpi_fix_pin2_polarity; 88extern int acpi_fix_pin2_polarity;
89extern int acpi_disable_cmcff;
89 90
90extern u8 acpi_sci_flags; 91extern u8 acpi_sci_flags;
91extern int acpi_sci_override_gsi; 92extern int acpi_sci_override_gsi;
@@ -168,6 +169,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
168 169
169#define acpi_lapic 0 170#define acpi_lapic 0
170#define acpi_ioapic 0 171#define acpi_ioapic 0
172#define acpi_disable_cmcff 0
171static inline void acpi_noirq_set(void) { } 173static inline void acpi_noirq_set(void) { }
172static inline void acpi_disable_pci(void) { } 174static inline void acpi_disable_pci(void) { }
173static inline void disable_acpi(void) { } 175static inline void disable_acpi(void) { }
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 58ed6d96a6ac..0a3f9c9f98d5 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -5,6 +5,7 @@
5#include <linux/stddef.h> 5#include <linux/stddef.h>
6#include <linux/stringify.h> 6#include <linux/stringify.h>
7#include <asm/asm.h> 7#include <asm/asm.h>
8#include <asm/ptrace.h>
8 9
9/* 10/*
10 * Alternative inline assembly for SMP. 11 * Alternative inline assembly for SMP.
@@ -220,20 +221,11 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
220 * no thread can be preempted in the instructions being modified (no iret to an 221 * no thread can be preempted in the instructions being modified (no iret to an
221 * invalid instruction possible) or if the instructions are changed from a 222 * invalid instruction possible) or if the instructions are changed from a
222 * consistent state to another consistent state atomically. 223 * consistent state to another consistent state atomically.
223 * More care must be taken when modifying code in the SMP case because of
224 * Intel's errata. text_poke_smp() takes care that errata, but still
225 * doesn't support NMI/MCE handler code modifying.
226 * On the local CPU you need to be protected again NMI or MCE handlers seeing an 224 * On the local CPU you need to be protected again NMI or MCE handlers seeing an
227 * inconsistent instruction while you patch. 225 * inconsistent instruction while you patch.
228 */ 226 */
229struct text_poke_param {
230 void *addr;
231 const void *opcode;
232 size_t len;
233};
234
235extern void *text_poke(void *addr, const void *opcode, size_t len); 227extern void *text_poke(void *addr, const void *opcode, size_t len);
236extern void *text_poke_smp(void *addr, const void *opcode, size_t len); 228extern int poke_int3_handler(struct pt_regs *regs);
237extern void text_poke_smp_batch(struct text_poke_param *params, int n); 229extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
238 230
239#endif /* _ASM_X86_ALTERNATIVE_H */ 231#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f8119b582c3c..1d2091a226bc 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -715,4 +715,6 @@ static inline void exiting_ack_irq(void)
715 ack_APIC_irq(); 715 ack_APIC_irq();
716} 716}
717 717
718extern void ioapic_zap_locks(void);
719
718#endif /* _ASM_X86_APIC_H */ 720#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 1c2d247f65ce..4582e8e1cd1a 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,21 +3,25 @@
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5# define __ASM_FORM(x) x 5# define __ASM_FORM(x) x
6# define __ASM_FORM_RAW(x) x
6# define __ASM_FORM_COMMA(x) x, 7# define __ASM_FORM_COMMA(x) x,
7#else 8#else
8# define __ASM_FORM(x) " " #x " " 9# define __ASM_FORM(x) " " #x " "
10# define __ASM_FORM_RAW(x) #x
9# define __ASM_FORM_COMMA(x) " " #x "," 11# define __ASM_FORM_COMMA(x) " " #x ","
10#endif 12#endif
11 13
12#ifdef CONFIG_X86_32 14#ifdef CONFIG_X86_32
13# define __ASM_SEL(a,b) __ASM_FORM(a) 15# define __ASM_SEL(a,b) __ASM_FORM(a)
16# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
14#else 17#else
15# define __ASM_SEL(a,b) __ASM_FORM(b) 18# define __ASM_SEL(a,b) __ASM_FORM(b)
19# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
16#endif 20#endif
17 21
18#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ 22#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
19 inst##q##__VA_ARGS__) 23 inst##q##__VA_ARGS__)
20#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) 24#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
21 25
22#define _ASM_PTR __ASM_SEL(.long, .quad) 26#define _ASM_PTR __ASM_SEL(.long, .quad)
23#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) 27#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6dfd0195bb55..41639ce8fd63 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,14 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17 17
18#if BITS_PER_LONG == 32
19# define _BITOPS_LONG_SHIFT 5
20#elif BITS_PER_LONG == 64
21# define _BITOPS_LONG_SHIFT 6
22#else
23# error "Unexpected BITS_PER_LONG"
24#endif
25
18#define BIT_64(n) (U64_C(1) << (n)) 26#define BIT_64(n) (U64_C(1) << (n))
19 27
20/* 28/*
@@ -59,7 +67,7 @@
59 * restricted to acting on a single-word quantity. 67 * restricted to acting on a single-word quantity.
60 */ 68 */
61static __always_inline void 69static __always_inline void
62set_bit(unsigned int nr, volatile unsigned long *addr) 70set_bit(long nr, volatile unsigned long *addr)
63{ 71{
64 if (IS_IMMEDIATE(nr)) { 72 if (IS_IMMEDIATE(nr)) {
65 asm volatile(LOCK_PREFIX "orb %1,%0" 73 asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -81,7 +89,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
81 * If it's called on the same region of memory simultaneously, the effect 89 * If it's called on the same region of memory simultaneously, the effect
82 * may be that only one operation succeeds. 90 * may be that only one operation succeeds.
83 */ 91 */
84static inline void __set_bit(int nr, volatile unsigned long *addr) 92static inline void __set_bit(long nr, volatile unsigned long *addr)
85{ 93{
86 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); 94 asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
87} 95}
@@ -97,7 +105,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
97 * in order to ensure changes are visible on other processors. 105 * in order to ensure changes are visible on other processors.
98 */ 106 */
99static __always_inline void 107static __always_inline void
100clear_bit(int nr, volatile unsigned long *addr) 108clear_bit(long nr, volatile unsigned long *addr)
101{ 109{
102 if (IS_IMMEDIATE(nr)) { 110 if (IS_IMMEDIATE(nr)) {
103 asm volatile(LOCK_PREFIX "andb %1,%0" 111 asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -118,13 +126,13 @@ clear_bit(int nr, volatile unsigned long *addr)
118 * clear_bit() is atomic and implies release semantics before the memory 126 * clear_bit() is atomic and implies release semantics before the memory
119 * operation. It can be used for an unlock. 127 * operation. It can be used for an unlock.
120 */ 128 */
121static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) 129static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
122{ 130{
123 barrier(); 131 barrier();
124 clear_bit(nr, addr); 132 clear_bit(nr, addr);
125} 133}
126 134
127static inline void __clear_bit(int nr, volatile unsigned long *addr) 135static inline void __clear_bit(long nr, volatile unsigned long *addr)
128{ 136{
129 asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); 137 asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
130} 138}
@@ -141,7 +149,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
141 * No memory barrier is required here, because x86 cannot reorder stores past 149 * No memory barrier is required here, because x86 cannot reorder stores past
142 * older loads. Same principle as spin_unlock. 150 * older loads. Same principle as spin_unlock.
143 */ 151 */
144static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) 152static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
145{ 153{
146 barrier(); 154 barrier();
147 __clear_bit(nr, addr); 155 __clear_bit(nr, addr);
@@ -159,7 +167,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
159 * If it's called on the same region of memory simultaneously, the effect 167 * If it's called on the same region of memory simultaneously, the effect
160 * may be that only one operation succeeds. 168 * may be that only one operation succeeds.
161 */ 169 */
162static inline void __change_bit(int nr, volatile unsigned long *addr) 170static inline void __change_bit(long nr, volatile unsigned long *addr)
163{ 171{
164 asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); 172 asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
165} 173}
@@ -173,7 +181,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
173 * Note that @nr may be almost arbitrarily large; this function is not 181 * Note that @nr may be almost arbitrarily large; this function is not
174 * restricted to acting on a single-word quantity. 182 * restricted to acting on a single-word quantity.
175 */ 183 */
176static inline void change_bit(int nr, volatile unsigned long *addr) 184static inline void change_bit(long nr, volatile unsigned long *addr)
177{ 185{
178 if (IS_IMMEDIATE(nr)) { 186 if (IS_IMMEDIATE(nr)) {
179 asm volatile(LOCK_PREFIX "xorb %1,%0" 187 asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -194,7 +202,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
194 * This operation is atomic and cannot be reordered. 202 * This operation is atomic and cannot be reordered.
195 * It also implies a memory barrier. 203 * It also implies a memory barrier.
196 */ 204 */
197static inline int test_and_set_bit(int nr, volatile unsigned long *addr) 205static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
198{ 206{
199 int oldbit; 207 int oldbit;
200 208
@@ -212,7 +220,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
212 * This is the same as test_and_set_bit on x86. 220 * This is the same as test_and_set_bit on x86.
213 */ 221 */
214static __always_inline int 222static __always_inline int
215test_and_set_bit_lock(int nr, volatile unsigned long *addr) 223test_and_set_bit_lock(long nr, volatile unsigned long *addr)
216{ 224{
217 return test_and_set_bit(nr, addr); 225 return test_and_set_bit(nr, addr);
218} 226}
@@ -226,7 +234,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr)
226 * If two examples of this operation race, one can appear to succeed 234 * If two examples of this operation race, one can appear to succeed
227 * but actually fail. You must protect multiple accesses with a lock. 235 * but actually fail. You must protect multiple accesses with a lock.
228 */ 236 */
229static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) 237static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
230{ 238{
231 int oldbit; 239 int oldbit;
232 240
@@ -245,7 +253,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
245 * This operation is atomic and cannot be reordered. 253 * This operation is atomic and cannot be reordered.
246 * It also implies a memory barrier. 254 * It also implies a memory barrier.
247 */ 255 */
248static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) 256static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
249{ 257{
250 int oldbit; 258 int oldbit;
251 259
@@ -272,7 +280,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
272 * accessed from a hypervisor on the same CPU if running in a VM: don't change 280 * accessed from a hypervisor on the same CPU if running in a VM: don't change
273 * this without also updating arch/x86/kernel/kvm.c 281 * this without also updating arch/x86/kernel/kvm.c
274 */ 282 */
275static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) 283static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
276{ 284{
277 int oldbit; 285 int oldbit;
278 286
@@ -284,7 +292,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
284} 292}
285 293
286/* WARNING: non atomic and it can be reordered! */ 294/* WARNING: non atomic and it can be reordered! */
287static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) 295static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
288{ 296{
289 int oldbit; 297 int oldbit;
290 298
@@ -304,7 +312,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
304 * This operation is atomic and cannot be reordered. 312 * This operation is atomic and cannot be reordered.
305 * It also implies a memory barrier. 313 * It also implies a memory barrier.
306 */ 314 */
307static inline int test_and_change_bit(int nr, volatile unsigned long *addr) 315static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
308{ 316{
309 int oldbit; 317 int oldbit;
310 318
@@ -315,13 +323,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
315 return oldbit; 323 return oldbit;
316} 324}
317 325
318static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) 326static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
319{ 327{
320 return ((1UL << (nr % BITS_PER_LONG)) & 328 return ((1UL << (nr & (BITS_PER_LONG-1))) &
321 (addr[nr / BITS_PER_LONG])) != 0; 329 (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
322} 330}
323 331
324static inline int variable_test_bit(int nr, volatile const unsigned long *addr) 332static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
325{ 333{
326 int oldbit; 334 int oldbit;
327 335
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f9..4a8cb8d7cbd5 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
35 */ 35 */
36 if (boot_params->sentinel) { 36 if (boot_params->sentinel) {
37 /* fields in boot_params are left uninitialized, clear them */ 37 /* fields in boot_params are left uninitialized, clear them */
38 memset(&boot_params->olpc_ofw_header, 0, 38 memset(&boot_params->ext_ramdisk_image, 0,
39 (char *)&boot_params->efi_info - 39 (char *)&boot_params->efi_info -
40 (char *)&boot_params->olpc_ofw_header); 40 (char *)&boot_params->ext_ramdisk_image);
41 memset(&boot_params->kbd_status, 0, 41 memset(&boot_params->kbd_status, 0,
42 (char *)&boot_params->hdr - 42 (char *)&boot_params->hdr -
43 (char *)&boot_params->kbd_status); 43 (char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474fd819..f50de6951738 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
49 int len, __wsum sum, 49 int len, __wsum sum,
50 int *err_ptr) 50 int *err_ptr)
51{ 51{
52 __wsum ret;
53
52 might_sleep(); 54 might_sleep();
53 return csum_partial_copy_generic((__force void *)src, dst, 55 stac();
54 len, sum, err_ptr, NULL); 56 ret = csum_partial_copy_generic((__force void *)src, dst,
57 len, sum, err_ptr, NULL);
58 clac();
59
60 return ret;
55} 61}
56 62
57/* 63/*
@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
176 int len, __wsum sum, 182 int len, __wsum sum,
177 int *err_ptr) 183 int *err_ptr)
178{ 184{
185 __wsum ret;
186
179 might_sleep(); 187 might_sleep();
180 if (access_ok(VERIFY_WRITE, dst, len)) 188 if (access_ok(VERIFY_WRITE, dst, len)) {
181 return csum_partial_copy_generic(src, (__force void *)dst, 189 stac();
182 len, sum, NULL, err_ptr); 190 ret = csum_partial_copy_generic(src, (__force void *)dst,
191 len, sum, NULL, err_ptr);
192 clac();
193 return ret;
194 }
183 195
184 if (len) 196 if (len)
185 *err_ptr = -EFAULT; 197 *err_ptr = -EFAULT;
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 9bfdc41629ec..e6fd8a026c7b 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -133,7 +133,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
133 133
134 134
135/* Do not call this directly. Use the wrappers below */ 135/* Do not call this directly. Use the wrappers below */
136extern __wsum csum_partial_copy_generic(const void *src, const void *dst, 136extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
137 int len, __wsum sum, 137 int len, __wsum sum,
138 int *src_err_ptr, int *dst_err_ptr); 138 int *src_err_ptr, int *dst_err_ptr);
139 139
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 47538a61c91b..d3f5c63078d8 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -366,9 +366,10 @@ extern bool __static_cpu_has_safe(u16 bit);
366 */ 366 */
367static __always_inline __pure bool __static_cpu_has(u16 bit) 367static __always_inline __pure bool __static_cpu_has(u16 bit)
368{ 368{
369#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 369#ifdef CC_HAVE_ASM_GOTO
370 370
371#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS 371#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
372
372 /* 373 /*
373 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
374 * have run. 375 * have run.
@@ -384,6 +385,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
384 ".previous\n" 385 ".previous\n"
385 /* skipping size check since replacement size = 0 */ 386 /* skipping size check since replacement size = 0 */
386 : : "i" (X86_FEATURE_ALWAYS) : : t_warn); 387 : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
388
387#endif 389#endif
388 390
389 asm goto("1: jmp %l[t_no]\n" 391 asm goto("1: jmp %l[t_no]\n"
@@ -406,7 +408,9 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
406 warn_pre_alternatives(); 408 warn_pre_alternatives();
407 return false; 409 return false;
408#endif 410#endif
409#else /* GCC_VERSION >= 40500 */ 411
412#else /* CC_HAVE_ASM_GOTO */
413
410 u8 flag; 414 u8 flag;
411 /* Open-coded due to __stringify() in ALTERNATIVE() */ 415 /* Open-coded due to __stringify() in ALTERNATIVE() */
412 asm volatile("1: movb $0,%0\n" 416 asm volatile("1: movb $0,%0\n"
@@ -427,7 +431,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
427 ".previous\n" 431 ".previous\n"
428 : "=qm" (flag) : "i" (bit)); 432 : "=qm" (flag) : "i" (bit));
429 return flag; 433 return flag;
430#endif 434
435#endif /* CC_HAVE_ASM_GOTO */
431} 436}
432 437
433#define static_cpu_has(bit) \ 438#define static_cpu_has(bit) \
@@ -441,7 +446,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
441 446
442static __always_inline __pure bool _static_cpu_has_safe(u16 bit) 447static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
443{ 448{
444#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5 449#ifdef CC_HAVE_ASM_GOTO
445/* 450/*
446 * We need to spell the jumps to the compiler because, depending on the offset, 451 * We need to spell the jumps to the compiler because, depending on the offset,
447 * the replacement jump can be bigger than the original jump, and this we cannot 452 * the replacement jump can be bigger than the original jump, and this we cannot
@@ -475,7 +480,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
475 return false; 480 return false;
476 t_dynamic: 481 t_dynamic:
477 return __static_cpu_has_safe(bit); 482 return __static_cpu_has_safe(bit);
478#else /* GCC_VERSION >= 40500 */ 483#else
479 u8 flag; 484 u8 flag;
480 /* Open-coded due to __stringify() in ALTERNATIVE() */ 485 /* Open-coded due to __stringify() in ALTERNATIVE() */
481 asm volatile("1: movb $2,%0\n" 486 asm volatile("1: movb $2,%0\n"
@@ -511,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
511 : "=qm" (flag) 516 : "=qm" (flag)
512 : "i" (bit), "i" (X86_FEATURE_ALWAYS)); 517 : "i" (bit), "i" (X86_FEATURE_ALWAYS));
513 return (flag == 2 ? __static_cpu_has_safe(bit) : flag); 518 return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
514#endif 519#endif /* CC_HAVE_ASM_GOTO */
515} 520}
516 521
517#define static_cpu_has_safe(bit) \ 522#define static_cpu_has_safe(bit) \
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cccd07fa5e3a..779c2efe2e97 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
29extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize, 29extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
30 unsigned long start_addr, unsigned long long end_addr); 30 unsigned long start_addr, unsigned long long end_addr);
31struct setup_data; 31struct setup_data;
32extern void parse_e820_ext(struct setup_data *data); 32extern void parse_e820_ext(u64 phys_addr, u32 data_len);
33 33
34#if defined(CONFIG_X86_64) || \ 34#if defined(CONFIG_X86_64) || \
35 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION)) 35 (defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index e4ac559c4a24..92b3bae08b74 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -26,56 +26,56 @@
26#include <asm/sections.h> 26#include <asm/sections.h>
27 27
28/* Interrupt handlers registered during init_IRQ */ 28/* Interrupt handlers registered during init_IRQ */
29extern void apic_timer_interrupt(void); 29extern asmlinkage void apic_timer_interrupt(void);
30extern void x86_platform_ipi(void); 30extern asmlinkage void x86_platform_ipi(void);
31extern void kvm_posted_intr_ipi(void); 31extern asmlinkage void kvm_posted_intr_ipi(void);
32extern void error_interrupt(void); 32extern asmlinkage void error_interrupt(void);
33extern void irq_work_interrupt(void); 33extern asmlinkage void irq_work_interrupt(void);
34 34
35extern void spurious_interrupt(void); 35extern asmlinkage void spurious_interrupt(void);
36extern void thermal_interrupt(void); 36extern asmlinkage void thermal_interrupt(void);
37extern void reschedule_interrupt(void); 37extern asmlinkage void reschedule_interrupt(void);
38 38
39extern void invalidate_interrupt(void); 39extern asmlinkage void invalidate_interrupt(void);
40extern void invalidate_interrupt0(void); 40extern asmlinkage void invalidate_interrupt0(void);
41extern void invalidate_interrupt1(void); 41extern asmlinkage void invalidate_interrupt1(void);
42extern void invalidate_interrupt2(void); 42extern asmlinkage void invalidate_interrupt2(void);
43extern void invalidate_interrupt3(void); 43extern asmlinkage void invalidate_interrupt3(void);
44extern void invalidate_interrupt4(void); 44extern asmlinkage void invalidate_interrupt4(void);
45extern void invalidate_interrupt5(void); 45extern asmlinkage void invalidate_interrupt5(void);
46extern void invalidate_interrupt6(void); 46extern asmlinkage void invalidate_interrupt6(void);
47extern void invalidate_interrupt7(void); 47extern asmlinkage void invalidate_interrupt7(void);
48extern void invalidate_interrupt8(void); 48extern asmlinkage void invalidate_interrupt8(void);
49extern void invalidate_interrupt9(void); 49extern asmlinkage void invalidate_interrupt9(void);
50extern void invalidate_interrupt10(void); 50extern asmlinkage void invalidate_interrupt10(void);
51extern void invalidate_interrupt11(void); 51extern asmlinkage void invalidate_interrupt11(void);
52extern void invalidate_interrupt12(void); 52extern asmlinkage void invalidate_interrupt12(void);
53extern void invalidate_interrupt13(void); 53extern asmlinkage void invalidate_interrupt13(void);
54extern void invalidate_interrupt14(void); 54extern asmlinkage void invalidate_interrupt14(void);
55extern void invalidate_interrupt15(void); 55extern asmlinkage void invalidate_interrupt15(void);
56extern void invalidate_interrupt16(void); 56extern asmlinkage void invalidate_interrupt16(void);
57extern void invalidate_interrupt17(void); 57extern asmlinkage void invalidate_interrupt17(void);
58extern void invalidate_interrupt18(void); 58extern asmlinkage void invalidate_interrupt18(void);
59extern void invalidate_interrupt19(void); 59extern asmlinkage void invalidate_interrupt19(void);
60extern void invalidate_interrupt20(void); 60extern asmlinkage void invalidate_interrupt20(void);
61extern void invalidate_interrupt21(void); 61extern asmlinkage void invalidate_interrupt21(void);
62extern void invalidate_interrupt22(void); 62extern asmlinkage void invalidate_interrupt22(void);
63extern void invalidate_interrupt23(void); 63extern asmlinkage void invalidate_interrupt23(void);
64extern void invalidate_interrupt24(void); 64extern asmlinkage void invalidate_interrupt24(void);
65extern void invalidate_interrupt25(void); 65extern asmlinkage void invalidate_interrupt25(void);
66extern void invalidate_interrupt26(void); 66extern asmlinkage void invalidate_interrupt26(void);
67extern void invalidate_interrupt27(void); 67extern asmlinkage void invalidate_interrupt27(void);
68extern void invalidate_interrupt28(void); 68extern asmlinkage void invalidate_interrupt28(void);
69extern void invalidate_interrupt29(void); 69extern asmlinkage void invalidate_interrupt29(void);
70extern void invalidate_interrupt30(void); 70extern asmlinkage void invalidate_interrupt30(void);
71extern void invalidate_interrupt31(void); 71extern asmlinkage void invalidate_interrupt31(void);
72 72
73extern void irq_move_cleanup_interrupt(void); 73extern asmlinkage void irq_move_cleanup_interrupt(void);
74extern void reboot_interrupt(void); 74extern asmlinkage void reboot_interrupt(void);
75extern void threshold_interrupt(void); 75extern asmlinkage void threshold_interrupt(void);
76 76
77extern void call_function_interrupt(void); 77extern asmlinkage void call_function_interrupt(void);
78extern void call_function_single_interrupt(void); 78extern asmlinkage void call_function_single_interrupt(void);
79 79
80#ifdef CONFIG_TRACING 80#ifdef CONFIG_TRACING
81/* Interrupt handlers registered during init_IRQ */ 81/* Interrupt handlers registered during init_IRQ */
@@ -172,22 +172,18 @@ extern atomic_t irq_mis_count;
172extern void eisa_set_level_irq(unsigned int irq); 172extern void eisa_set_level_irq(unsigned int irq);
173 173
174/* SMP */ 174/* SMP */
175extern void smp_apic_timer_interrupt(struct pt_regs *); 175extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
176extern void smp_spurious_interrupt(struct pt_regs *); 176extern __visible void smp_spurious_interrupt(struct pt_regs *);
177extern void smp_x86_platform_ipi(struct pt_regs *); 177extern __visible void smp_x86_platform_ipi(struct pt_regs *);
178extern void smp_error_interrupt(struct pt_regs *); 178extern __visible void smp_error_interrupt(struct pt_regs *);
179#ifdef CONFIG_X86_IO_APIC 179#ifdef CONFIG_X86_IO_APIC
180extern asmlinkage void smp_irq_move_cleanup_interrupt(void); 180extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
181#endif 181#endif
182#ifdef CONFIG_SMP 182#ifdef CONFIG_SMP
183extern void smp_reschedule_interrupt(struct pt_regs *); 183extern __visible void smp_reschedule_interrupt(struct pt_regs *);
184extern void smp_call_function_interrupt(struct pt_regs *); 184extern __visible void smp_call_function_interrupt(struct pt_regs *);
185extern void smp_call_function_single_interrupt(struct pt_regs *); 185extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
186#ifdef CONFIG_X86_32 186extern __visible void smp_invalidate_interrupt(struct pt_regs *);
187extern void smp_invalidate_interrupt(struct pt_regs *);
188#else
189extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
190#endif
191#endif 187#endif
192 188
193extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void); 189extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 2d4b5e6107cd..e42f758a0fbd 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -33,7 +33,7 @@ struct hypervisor_x86 {
33 const char *name; 33 const char *name;
34 34
35 /* Detection routine */ 35 /* Detection routine */
36 bool (*detect)(void); 36 uint32_t (*detect)(void);
37 37
38 /* Adjust CPU feature bits (run once per CPU) */ 38 /* Adjust CPU feature bits (run once per CPU) */
39 void (*set_cpu_features)(struct cpuinfo_x86 *); 39 void (*set_cpu_features)(struct cpuinfo_x86 *);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 57873beb3292..0ea10f27d613 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -33,7 +33,7 @@ extern void (*x86_platform_ipi_callback)(void);
33extern void native_init_IRQ(void); 33extern void native_init_IRQ(void);
34extern bool handle_irq(unsigned irq, struct pt_regs *regs); 34extern bool handle_irq(unsigned irq, struct pt_regs *regs);
35 35
36extern unsigned int do_IRQ(struct pt_regs *regs); 36extern __visible unsigned int do_IRQ(struct pt_regs *regs);
37 37
38/* Interrupt vector management */ 38/* Interrupt vector management */
39extern DECLARE_BITMAP(used_vectors, NR_VECTORS); 39extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 5a6d2873f80e..9454c167629f 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -49,10 +49,10 @@ typedef u8 kprobe_opcode_t;
49#define flush_insn_slot(p) do { } while (0) 49#define flush_insn_slot(p) do { } while (0)
50 50
51/* optinsn template addresses */ 51/* optinsn template addresses */
52extern kprobe_opcode_t optprobe_template_entry; 52extern __visible kprobe_opcode_t optprobe_template_entry;
53extern kprobe_opcode_t optprobe_template_val; 53extern __visible kprobe_opcode_t optprobe_template_val;
54extern kprobe_opcode_t optprobe_template_call; 54extern __visible kprobe_opcode_t optprobe_template_call;
55extern kprobe_opcode_t optprobe_template_end; 55extern __visible kprobe_opcode_t optprobe_template_end;
56#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) 56#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
57#define MAX_OPTINSN_SIZE \ 57#define MAX_OPTINSN_SIZE \
58 (((unsigned long)&optprobe_template_end - \ 58 (((unsigned long)&optprobe_template_end - \
@@ -62,7 +62,7 @@ extern kprobe_opcode_t optprobe_template_end;
62extern const int kretprobe_blacklist_size; 62extern const int kretprobe_blacklist_size;
63 63
64void arch_remove_kprobe(struct kprobe *p); 64void arch_remove_kprobe(struct kprobe *p);
65void kretprobe_trampoline(void); 65asmlinkage void kretprobe_trampoline(void);
66 66
67/* Architecture specific copy of original instruction*/ 67/* Architecture specific copy of original instruction*/
68struct arch_specific_insn { 68struct arch_specific_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f87f7fcefa0a..c76ff74a98f2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -286,6 +286,7 @@ struct kvm_mmu {
286 u64 *pae_root; 286 u64 *pae_root;
287 u64 *lm_root; 287 u64 *lm_root;
288 u64 rsvd_bits_mask[2][4]; 288 u64 rsvd_bits_mask[2][4];
289 u64 bad_mt_xwr;
289 290
290 /* 291 /*
291 * Bitmap: bit set = last pte in walk 292 * Bitmap: bit set = last pte in walk
@@ -323,6 +324,7 @@ struct kvm_pmu {
323 u64 global_ovf_ctrl; 324 u64 global_ovf_ctrl;
324 u64 counter_bitmask[2]; 325 u64 counter_bitmask[2];
325 u64 global_ctrl_mask; 326 u64 global_ctrl_mask;
327 u64 reserved_bits;
326 u8 version; 328 u8 version;
327 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC]; 329 struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
328 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED]; 330 struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
@@ -511,6 +513,14 @@ struct kvm_vcpu_arch {
511 * instruction. 513 * instruction.
512 */ 514 */
513 bool write_fault_to_shadow_pgtable; 515 bool write_fault_to_shadow_pgtable;
516
517 /* set at EPT violation at this point */
518 unsigned long exit_qualification;
519
520 /* pv related host specific info */
521 struct {
522 bool pv_unhalted;
523 } pv;
514}; 524};
515 525
516struct kvm_lpage_info { 526struct kvm_lpage_info {
@@ -802,8 +812,8 @@ extern u32 kvm_min_guest_tsc_khz;
802extern u32 kvm_max_guest_tsc_khz; 812extern u32 kvm_max_guest_tsc_khz;
803 813
804enum emulation_result { 814enum emulation_result {
805 EMULATE_DONE, /* no further processing */ 815 EMULATE_DONE, /* no further processing */
806 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 816 EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
807 EMULATE_FAIL, /* can't emulate this instruction */ 817 EMULATE_FAIL, /* can't emulate this instruction */
808}; 818};
809 819
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 695399f2d5eb..1df115909758 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,26 +85,20 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
85 return ret; 85 return ret;
86} 86}
87 87
88static inline bool kvm_para_available(void) 88static inline uint32_t kvm_cpuid_base(void)
89{ 89{
90 unsigned int eax, ebx, ecx, edx;
91 char signature[13];
92
93 if (boot_cpu_data.cpuid_level < 0) 90 if (boot_cpu_data.cpuid_level < 0)
94 return false; /* So we don't blow up on old processors */ 91 return 0; /* So we don't blow up on old processors */
95 92
96 if (cpu_has_hypervisor) { 93 if (cpu_has_hypervisor)
97 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 94 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
98 memcpy(signature + 0, &ebx, 4);
99 memcpy(signature + 4, &ecx, 4);
100 memcpy(signature + 8, &edx, 4);
101 signature[12] = 0;
102 95
103 if (strcmp(signature, "KVMKVMKVM") == 0) 96 return 0;
104 return true; 97}
105 }
106 98
107 return false; 99static inline bool kvm_para_available(void)
100{
101 return kvm_cpuid_base() != 0;
108} 102}
109 103
110static inline unsigned int kvm_arch_para_features(void) 104static inline unsigned int kvm_arch_para_features(void)
@@ -118,10 +112,20 @@ void kvm_async_pf_task_wait(u32 token);
118void kvm_async_pf_task_wake(u32 token); 112void kvm_async_pf_task_wake(u32 token);
119u32 kvm_read_and_reset_pf_reason(void); 113u32 kvm_read_and_reset_pf_reason(void);
120extern void kvm_disable_steal_time(void); 114extern void kvm_disable_steal_time(void);
121#else 115
122#define kvm_guest_init() do { } while (0) 116#ifdef CONFIG_PARAVIRT_SPINLOCKS
117void __init kvm_spinlock_init(void);
118#else /* !CONFIG_PARAVIRT_SPINLOCKS */
119static inline void kvm_spinlock_init(void)
120{
121}
122#endif /* CONFIG_PARAVIRT_SPINLOCKS */
123
124#else /* CONFIG_KVM_GUEST */
125#define kvm_guest_init() do {} while (0)
123#define kvm_async_pf_task_wait(T) do {} while(0) 126#define kvm_async_pf_task_wait(T) do {} while(0)
124#define kvm_async_pf_task_wake(T) do {} while(0) 127#define kvm_async_pf_task_wake(T) do {} while(0)
128
125static inline u32 kvm_read_and_reset_pf_reason(void) 129static inline u32 kvm_read_and_reset_pf_reason(void)
126{ 130{
127 return 0; 131 return 0;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 29e3093bbd21..cbe6b9e404ce 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -32,11 +32,20 @@
32#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ 32#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
33#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ 33#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
34#define MCI_STATUS_AR (1ULL<<55) /* Action required */ 34#define MCI_STATUS_AR (1ULL<<55) /* Action required */
35#define MCACOD 0xffff /* MCA Error Code */ 35
36/*
37 * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
38 * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
39 * errors to indicate that errors are being filtered by hardware.
40 * We should mask out bit 12 when looking for specific signatures
41 * of uncorrected errors - so the F bit is deliberately skipped
42 * in this #define.
43 */
44#define MCACOD 0xefff /* MCA Error Code */
36 45
37/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ 46/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
38#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ 47#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
39#define MCACOD_SCRUBMSK 0xfff0 48#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
40#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ 49#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
41#define MCACOD_DATA 0x0134 /* Data Load */ 50#define MCACOD_DATA 0x0134 /* Data Load */
42#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ 51#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
@@ -188,6 +197,9 @@ extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
188 const char __user *ubuf, 197 const char __user *ubuf,
189 size_t usize, loff_t *off)); 198 size_t usize, loff_t *off));
190 199
200/* Disable CMCI/polling for MCA bank claimed by firmware */
201extern void mce_disable_bank(int bank);
202
191/* 203/*
192 * Exception handler 204 * Exception handler
193 */ 205 */
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58ced23..4c019179a57d 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
59 59
60extern int __apply_microcode_amd(struct microcode_amd *mc_amd); 60extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
61extern int apply_microcode_amd(int cpu); 61extern int apply_microcode_amd(int cpu);
62extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); 62extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
63 63
64#ifdef CONFIG_MICROCODE_AMD_EARLY 64#ifdef CONFIG_MICROCODE_AMD_EARLY
65#ifdef CONFIG_X86_32 65#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index cdbf36776106..be12c534fd59 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
45 /* Re-load page tables */ 45 /* Re-load page tables */
46 load_cr3(next->pgd); 46 load_cr3(next->pgd);
47 47
48 /* stop flush ipis for the previous mm */ 48 /* Stop flush ipis for the previous mm */
49 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 49 cpumask_clear_cpu(cpu, mm_cpumask(prev));
50 50
51 /* 51 /* Load the LDT, if the LDT is different: */
52 * load the LDT, if the LDT is different:
53 */
54 if (unlikely(prev->context.ldt != next->context.ldt)) 52 if (unlikely(prev->context.ldt != next->context.ldt))
55 load_LDT_nolock(&next->context); 53 load_LDT_nolock(&next->context);
56 } 54 }
57#ifdef CONFIG_SMP 55#ifdef CONFIG_SMP
58 else { 56 else {
59 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); 57 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
60 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); 58 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
61 59
62 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { 60 if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
63 /* We were in lazy tlb mode and leave_mm disabled 61 /*
62 * On established mms, the mm_cpumask is only changed
63 * from irq context, from ptep_clear_flush() while in
64 * lazy tlb mode, and here. Irqs are blocked during
65 * schedule, protecting us from simultaneous changes.
66 */
67 cpumask_set_cpu(cpu, mm_cpumask(next));
68 /*
69 * We were in lazy tlb mode and leave_mm disabled
64 * tlb flush IPI delivery. We must reload CR3 70 * tlb flush IPI delivery. We must reload CR3
65 * to make sure to use no freed page tables. 71 * to make sure to use no freed page tables.
66 */ 72 */
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 2c543fff241b..e7e6751648ed 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -16,6 +16,20 @@
16 * 16 *
17 * Atomically decrements @v and calls <fail_fn> if the result is negative. 17 * Atomically decrements @v and calls <fail_fn> if the result is negative.
18 */ 18 */
19#ifdef CC_HAVE_ASM_GOTO
20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *))
22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n"
25 : : "m" (v->counter)
26 : "memory", "cc"
27 : exit);
28 fail_fn(v);
29exit:
30 return;
31}
32#else
19#define __mutex_fastpath_lock(v, fail_fn) \ 33#define __mutex_fastpath_lock(v, fail_fn) \
20do { \ 34do { \
21 unsigned long dummy; \ 35 unsigned long dummy; \
@@ -32,6 +46,7 @@ do { \
32 : "rax", "rsi", "rdx", "rcx", \ 46 : "rax", "rsi", "rdx", "rcx", \
33 "r8", "r9", "r10", "r11", "memory"); \ 47 "r8", "r9", "r10", "r11", "memory"); \
34} while (0) 48} while (0)
49#endif
35 50
36/** 51/**
37 * __mutex_fastpath_lock_retval - try to take the lock by moving the count 52 * __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -56,6 +71,20 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
56 * 71 *
57 * Atomically increments @v and calls <fail_fn> if the result is nonpositive. 72 * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
58 */ 73 */
74#ifdef CC_HAVE_ASM_GOTO
75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *))
77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n"
80 : : "m" (v->counter)
81 : "memory", "cc"
82 : exit);
83 fail_fn(v);
84exit:
85 return;
86}
87#else
59#define __mutex_fastpath_unlock(v, fail_fn) \ 88#define __mutex_fastpath_unlock(v, fail_fn) \
60do { \ 89do { \
61 unsigned long dummy; \ 90 unsigned long dummy; \
@@ -72,6 +101,7 @@ do { \
72 : "rax", "rsi", "rdx", "rcx", \ 101 : "rax", "rsi", "rdx", "rcx", \
73 "r8", "r9", "r10", "r11", "memory"); \ 102 "r8", "r9", "r10", "r11", "memory"); \
74} while (0) 103} while (0)
104#endif
75 105
76#define __mutex_slowpath_needs_to_unlock() 1 106#define __mutex_slowpath_needs_to_unlock() 1
77 107
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ef17af013475..f48b17df4224 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,6 +15,8 @@
15 */ 15 */
16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) 16#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
17 17
18#define __START_KERNEL_map __PAGE_OFFSET
19
18#define THREAD_SIZE_ORDER 1 20#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 21#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
20 22
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 6c896fbe21db..43dcd804ebd5 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -32,11 +32,6 @@
32 */ 32 */
33#define __PAGE_OFFSET _AC(0xffff880000000000, UL) 33#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
34 34
35#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
36 (CONFIG_PHYSICAL_ALIGN - 1)) & \
37 ~(CONFIG_PHYSICAL_ALIGN - 1))
38
39#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
40#define __START_KERNEL_map _AC(0xffffffff80000000, UL) 35#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
41 36
42/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ 37/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 54c97879195e..f97fbe3abb67 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -33,6 +33,11 @@
33 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \ 33 (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
34 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) 34 VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
35 35
36#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
37 CONFIG_PHYSICAL_ALIGN)
38
39#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
40
36#ifdef CONFIG_X86_64 41#ifdef CONFIG_X86_64
37#include <asm/page_64_types.h> 42#include <asm/page_64_types.h>
38#else 43#else
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cfdc9ee4c900..401f350ef71b 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
712 712
713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) 713#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
714 714
715static inline int arch_spin_is_locked(struct arch_spinlock *lock) 715static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
716 __ticket_t ticket)
716{ 717{
717 return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); 718 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
718} 719}
719 720
720static inline int arch_spin_is_contended(struct arch_spinlock *lock) 721static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
722 __ticket_t ticket)
721{ 723{
722 return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); 724 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
723}
724#define arch_spin_is_contended arch_spin_is_contended
725
726static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
727{
728 PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
729}
730
731static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
732 unsigned long flags)
733{
734 PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
735}
736
737static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
738{
739 return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
740}
741
742static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
743{
744 PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
745} 725}
746 726
747#endif 727#endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0db1fcac668c..aab8f671b523 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
327}; 327};
328 328
329struct arch_spinlock; 329struct arch_spinlock;
330#ifdef CONFIG_SMP
331#include <asm/spinlock_types.h>
332#else
333typedef u16 __ticket_t;
334#endif
335
330struct pv_lock_ops { 336struct pv_lock_ops {
331 int (*spin_is_locked)(struct arch_spinlock *lock); 337 struct paravirt_callee_save lock_spinning;
332 int (*spin_is_contended)(struct arch_spinlock *lock); 338 void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
333 void (*spin_lock)(struct arch_spinlock *lock);
334 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
335 int (*spin_trylock)(struct arch_spinlock *lock);
336 void (*spin_unlock)(struct arch_spinlock *lock);
337}; 339};
338 340
339/* This contains all the paravirt structures: we get a convenient 341/* This contains all the paravirt structures: we get a convenient
@@ -387,7 +389,8 @@ extern struct pv_lock_ops pv_lock_ops;
387 389
388/* Simple instruction patching code. */ 390/* Simple instruction patching code. */
389#define DEF_NATIVE(ops, name, code) \ 391#define DEF_NATIVE(ops, name, code) \
390 extern const char start_##ops##_##name[], end_##ops##_##name[]; \ 392 extern const char start_##ops##_##name[] __visible, \
393 end_##ops##_##name[] __visible; \
391 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":") 394 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
392 395
393unsigned paravirt_patch_nop(void); 396unsigned paravirt_patch_nop(void);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf1602..3bf2dd0cf61f 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) 55#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
56#endif 56#endif
57 57
58#ifdef CONFIG_MEM_SOFT_DIRTY
59
60/*
61 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
62 * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
63 * into this range.
64 */
65#define PTE_FILE_MAX_BITS 28
66#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
67#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
68#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
69#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
70#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
71#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
72#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
73
74#define pte_to_pgoff(pte) \
75 ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
76 & ((1U << PTE_FILE_BITS1) - 1))) \
77 + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
78 & ((1U << PTE_FILE_BITS2) - 1)) \
79 << (PTE_FILE_BITS1)) \
80 + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
81 & ((1U << PTE_FILE_BITS3) - 1)) \
82 << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
83 + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
84 << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
85
86#define pgoff_to_pte(off) \
87 ((pte_t) { .pte_low = \
88 ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
89 + ((((off) >> PTE_FILE_BITS1) \
90 & ((1U << PTE_FILE_BITS2) - 1)) \
91 << PTE_FILE_SHIFT2) \
92 + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
93 & ((1U << PTE_FILE_BITS3) - 1)) \
94 << PTE_FILE_SHIFT3) \
95 + ((((off) >> \
96 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
97 << PTE_FILE_SHIFT4) \
98 + _PAGE_FILE })
99
100#else /* CONFIG_MEM_SOFT_DIRTY */
101
58/* 102/*
59 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, 103 * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
60 * split up the 29 bits of offset into this range: 104 * split up the 29 bits of offset into this range.
61 */ 105 */
62#define PTE_FILE_MAX_BITS 29 106#define PTE_FILE_MAX_BITS 29
63#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) 107#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
88 << PTE_FILE_SHIFT3) \ 132 << PTE_FILE_SHIFT3) \
89 + _PAGE_FILE }) 133 + _PAGE_FILE })
90 134
135#endif /* CONFIG_MEM_SOFT_DIRTY */
136
91/* Encode and de-code a swap entry */ 137/* Encode and de-code a swap entry */
92#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE 138#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
93#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) 139#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc3..81bb91b49a88 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
179/* 179/*
180 * Bits 0, 6 and 7 are taken in the low part of the pte, 180 * Bits 0, 6 and 7 are taken in the low part of the pte,
181 * put the 32 bits of offset into the high part. 181 * put the 32 bits of offset into the high part.
182 *
183 * For soft-dirty tracking 11 bit is taken from
184 * the low part of pte as well.
182 */ 185 */
183#define pte_to_pgoff(pte) ((pte).pte_high) 186#define pte_to_pgoff(pte) ((pte).pte_high)
184#define pgoff_to_pte(off) \ 187#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a46058..8d16befdec88 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -22,7 +22,8 @@
22 * ZERO_PAGE is a global shared page that is always zero: used 22 * ZERO_PAGE is a global shared page that is always zero: used
23 * for zero-mapped memory areas etc.. 23 * for zero-mapped memory areas etc..
24 */ 24 */
25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; 25extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
26 __visible;
26#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) 27#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
27 28
28extern spinlock_t pgd_lock; 29extern spinlock_t pgd_lock;
@@ -314,6 +315,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
314 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); 315 return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
315} 316}
316 317
318static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
319{
320 return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
321}
322
323static inline int pte_swp_soft_dirty(pte_t pte)
324{
325 return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
326}
327
328static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
329{
330 return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
331}
332
333static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
334{
335 return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
336}
337
338static inline pte_t pte_file_mksoft_dirty(pte_t pte)
339{
340 return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
341}
342
343static inline int pte_file_soft_dirty(pte_t pte)
344{
345 return pte_flags(pte) & _PAGE_SOFT_DIRTY;
346}
347
317/* 348/*
318 * Mask out unsupported bits in a present pgprot. Non-present pgprots 349 * Mask out unsupported bits in a present pgprot. Non-present pgprots
319 * can use those bits for other purposes, so leave them be. 350 * can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae48..f4843e031131 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
61 * they do not conflict with each other. 61 * they do not conflict with each other.
62 */ 62 */
63 63
64#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
65
64#ifdef CONFIG_MEM_SOFT_DIRTY 66#ifdef CONFIG_MEM_SOFT_DIRTY
65#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) 67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
66#else 68#else
67#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) 69#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
68#endif 70#endif
69 71
72/*
73 * Tracking soft dirty bit when a page goes to a swap is tricky.
74 * We need a bit which can be stored in pte _and_ not conflict
75 * with swap entry format. On x86 bits 6 and 7 are *not* involved
76 * into swap entry computation, but bit 6 is used for nonlinear
77 * file mapping, so we borrow bit 7 for soft dirty tracking.
78 */
79#ifdef CONFIG_MEM_SOFT_DIRTY
80#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
81#else
82#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
83#endif
84
70#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 85#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
71#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) 86#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
72#else 87#else
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 24cf5aefb704..987c75ecc334 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -412,7 +412,7 @@ union irq_stack_union {
412 }; 412 };
413}; 413};
414 414
415DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); 415DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
416DECLARE_INIT_PER_CPU(irq_stack_union); 416DECLARE_INIT_PER_CPU(irq_stack_union);
417 417
418DECLARE_PER_CPU(char *, irq_stack_ptr); 418DECLARE_PER_CPU(char *, irq_stack_ptr);
@@ -942,33 +942,19 @@ extern int set_tsc_mode(unsigned int val);
942 942
943extern u16 amd_get_nb_id(int cpu); 943extern u16 amd_get_nb_id(int cpu);
944 944
945struct aperfmperf { 945static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
946 u64 aperf, mperf;
947};
948
949static inline void get_aperfmperf(struct aperfmperf *am)
950{ 946{
951 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); 947 uint32_t base, eax, signature[3];
952
953 rdmsrl(MSR_IA32_APERF, am->aperf);
954 rdmsrl(MSR_IA32_MPERF, am->mperf);
955}
956 948
957#define APERFMPERF_SHIFT 10 949 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
950 cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
958 951
959static inline 952 if (!memcmp(sig, signature, 12) &&
960unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, 953 (leaves == 0 || ((eax - base) >= leaves)))
961 struct aperfmperf *new) 954 return base;
962{ 955 }
963 u64 aperf = new->aperf - old->aperf;
964 u64 mperf = new->mperf - old->mperf;
965 unsigned long ratio = aperf;
966
967 mperf >>= APERFMPERF_SHIFT;
968 if (mperf)
969 ratio = div64_u64(aperf, mperf);
970 956
971 return ratio; 957 return 0;
972} 958}
973 959
974extern unsigned long arch_align_stack(unsigned long sp); 960extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 109a9dd5d454..be8269b00e2a 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -93,7 +93,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
93 93
94struct pvclock_vsyscall_time_info { 94struct pvclock_vsyscall_time_info {
95 struct pvclock_vcpu_time_info pvti; 95 struct pvclock_vcpu_time_info pvti;
96 u32 migrate_count;
97} __attribute__((__aligned__(SMP_CACHE_BYTES))); 96} __attribute__((__aligned__(SMP_CACHE_BYTES)));
98 97
99#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info) 98#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index b7bf3505e1ec..347555492dad 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -6,6 +6,8 @@
6 6
7#define COMMAND_LINE_SIZE 2048 7#define COMMAND_LINE_SIZE 2048
8 8
9#include <linux/linkage.h>
10
9#ifdef __i386__ 11#ifdef __i386__
10 12
11#include <linux/pfn.h> 13#include <linux/pfn.h>
@@ -108,11 +110,11 @@ void *extend_brk(size_t size, size_t align);
108extern void probe_roms(void); 110extern void probe_roms(void);
109#ifdef __i386__ 111#ifdef __i386__
110 112
111void __init i386_start_kernel(void); 113asmlinkage void __init i386_start_kernel(void);
112 114
113#else 115#else
114void __init x86_64_start_kernel(char *real_mode); 116asmlinkage void __init x86_64_start_kernel(char *real_mode);
115void __init x86_64_start_reservations(char *real_mode_data); 117asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
116 118
117#endif /* __i386__ */ 119#endif /* __i386__ */
118#endif /* _SETUP */ 120#endif /* _SETUP */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2f4d924fe6c9..645cad2c95ff 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -101,7 +101,7 @@ static inline void native_wbinvd(void)
101 asm volatile("wbinvd": : :"memory"); 101 asm volatile("wbinvd": : :"memory");
102} 102}
103 103
104extern void native_load_gs_index(unsigned); 104extern asmlinkage void native_load_gs_index(unsigned);
105 105
106#ifdef CONFIG_PARAVIRT 106#ifdef CONFIG_PARAVIRT
107#include <asm/paravirt.h> 107#include <asm/paravirt.h>
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab5..bf156ded74b5 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
1#ifndef _ASM_X86_SPINLOCK_H 1#ifndef _ASM_X86_SPINLOCK_H
2#define _ASM_X86_SPINLOCK_H 2#define _ASM_X86_SPINLOCK_H
3 3
4#include <linux/jump_label.h>
4#include <linux/atomic.h> 5#include <linux/atomic.h>
5#include <asm/page.h> 6#include <asm/page.h>
6#include <asm/processor.h> 7#include <asm/processor.h>
7#include <linux/compiler.h> 8#include <linux/compiler.h>
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
10#include <asm/bitops.h>
11
9/* 12/*
10 * Your basic SMP spinlocks, allowing only a single CPU anywhere 13 * Your basic SMP spinlocks, allowing only a single CPU anywhere
11 * 14 *
@@ -34,6 +37,36 @@
34# define UNLOCK_LOCK_PREFIX 37# define UNLOCK_LOCK_PREFIX
35#endif 38#endif
36 39
40/* How long a lock should spin before we consider blocking */
41#define SPIN_THRESHOLD (1 << 15)
42
43extern struct static_key paravirt_ticketlocks_enabled;
44static __always_inline bool static_key_false(struct static_key *key);
45
46#ifdef CONFIG_PARAVIRT_SPINLOCKS
47
48static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
49{
50 set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
51}
52
53#else /* !CONFIG_PARAVIRT_SPINLOCKS */
54static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
55 __ticket_t ticket)
56{
57}
58static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
59 __ticket_t ticket)
60{
61}
62
63#endif /* CONFIG_PARAVIRT_SPINLOCKS */
64
65static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
66{
67 return lock.tickets.head == lock.tickets.tail;
68}
69
37/* 70/*
38 * Ticket locks are conceptually two parts, one indicating the current head of 71 * Ticket locks are conceptually two parts, one indicating the current head of
39 * the queue, and the other indicating the current tail. The lock is acquired 72 * the queue, and the other indicating the current tail. The lock is acquired
@@ -47,81 +80,101 @@
47 * in the high part, because a wide xadd increment of the low part would carry 80 * in the high part, because a wide xadd increment of the low part would carry
48 * up and contaminate the high part. 81 * up and contaminate the high part.
49 */ 82 */
50static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) 83static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
51{ 84{
52 register struct __raw_tickets inc = { .tail = 1 }; 85 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
53 86
54 inc = xadd(&lock->tickets, inc); 87 inc = xadd(&lock->tickets, inc);
88 if (likely(inc.head == inc.tail))
89 goto out;
55 90
91 inc.tail &= ~TICKET_SLOWPATH_FLAG;
56 for (;;) { 92 for (;;) {
57 if (inc.head == inc.tail) 93 unsigned count = SPIN_THRESHOLD;
58 break; 94
59 cpu_relax(); 95 do {
60 inc.head = ACCESS_ONCE(lock->tickets.head); 96 if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
97 goto out;
98 cpu_relax();
99 } while (--count);
100 __ticket_lock_spinning(lock, inc.tail);
61 } 101 }
62 barrier(); /* make sure nothing creeps before the lock is taken */ 102out: barrier(); /* make sure nothing creeps before the lock is taken */
63} 103}
64 104
65static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) 105static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
66{ 106{
67 arch_spinlock_t old, new; 107 arch_spinlock_t old, new;
68 108
69 old.tickets = ACCESS_ONCE(lock->tickets); 109 old.tickets = ACCESS_ONCE(lock->tickets);
70 if (old.tickets.head != old.tickets.tail) 110 if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
71 return 0; 111 return 0;
72 112
73 new.head_tail = old.head_tail + (1 << TICKET_SHIFT); 113 new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
74 114
75 /* cmpxchg is a full barrier, so nothing can move before it */ 115 /* cmpxchg is a full barrier, so nothing can move before it */
76 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; 116 return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
77} 117}
78 118
79static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) 119static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
120 arch_spinlock_t old)
80{ 121{
81 __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX); 122 arch_spinlock_t new;
123
124 BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
125
126 /* Perform the unlock on the "before" copy */
127 old.tickets.head += TICKET_LOCK_INC;
128
129 /* Clear the slowpath flag */
130 new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
131
132 /*
133 * If the lock is uncontended, clear the flag - use cmpxchg in
134 * case it changes behind our back though.
135 */
136 if (new.tickets.head != new.tickets.tail ||
137 cmpxchg(&lock->head_tail, old.head_tail,
138 new.head_tail) != old.head_tail) {
139 /*
140 * Lock still has someone queued for it, so wake up an
141 * appropriate waiter.
142 */
143 __ticket_unlock_kick(lock, old.tickets.head);
144 }
82} 145}
83 146
84static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) 147static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
85{ 148{
86 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); 149 if (TICKET_SLOWPATH_FLAG &&
150 static_key_false(&paravirt_ticketlocks_enabled)) {
151 arch_spinlock_t prev;
87 152
88 return tmp.tail != tmp.head; 153 prev = *lock;
89} 154 add_smp(&lock->tickets.head, TICKET_LOCK_INC);
90 155
91static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) 156 /* add_smp() is a full mb() */
92{
93 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
94 157
95 return (__ticket_t)(tmp.tail - tmp.head) > 1; 158 if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
159 __ticket_unlock_slowpath(lock, prev);
160 } else
161 __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
96} 162}
97 163
98#ifndef CONFIG_PARAVIRT_SPINLOCKS
99
100static inline int arch_spin_is_locked(arch_spinlock_t *lock) 164static inline int arch_spin_is_locked(arch_spinlock_t *lock)
101{ 165{
102 return __ticket_spin_is_locked(lock); 166 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
103}
104
105static inline int arch_spin_is_contended(arch_spinlock_t *lock)
106{
107 return __ticket_spin_is_contended(lock);
108}
109#define arch_spin_is_contended arch_spin_is_contended
110 167
111static __always_inline void arch_spin_lock(arch_spinlock_t *lock) 168 return tmp.tail != tmp.head;
112{
113 __ticket_spin_lock(lock);
114} 169}
115 170
116static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) 171static inline int arch_spin_is_contended(arch_spinlock_t *lock)
117{ 172{
118 return __ticket_spin_trylock(lock); 173 struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
119}
120 174
121static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) 175 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
122{
123 __ticket_spin_unlock(lock);
124} 176}
177#define arch_spin_is_contended arch_spin_is_contended
125 178
126static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, 179static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
127 unsigned long flags) 180 unsigned long flags)
@@ -129,8 +182,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
129 arch_spin_lock(lock); 182 arch_spin_lock(lock);
130} 183}
131 184
132#endif /* CONFIG_PARAVIRT_SPINLOCKS */
133
134static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) 185static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
135{ 186{
136 while (arch_spin_is_locked(lock)) 187 while (arch_spin_is_locked(lock))
@@ -233,8 +284,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
233#define arch_read_relax(lock) cpu_relax() 284#define arch_read_relax(lock) cpu_relax()
234#define arch_write_relax(lock) cpu_relax() 285#define arch_write_relax(lock) cpu_relax()
235 286
236/* The {read|write|spin}_lock() on x86 are full memory barriers. */
237static inline void smp_mb__after_lock(void) { }
238#define ARCH_HAS_SMP_MB_AFTER_LOCK
239
240#endif /* _ASM_X86_SPINLOCK_H */ 287#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index ad0ad07fc006..4f1bea19945b 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -1,13 +1,17 @@
1#ifndef _ASM_X86_SPINLOCK_TYPES_H 1#ifndef _ASM_X86_SPINLOCK_TYPES_H
2#define _ASM_X86_SPINLOCK_TYPES_H 2#define _ASM_X86_SPINLOCK_TYPES_H
3 3
4#ifndef __LINUX_SPINLOCK_TYPES_H
5# error "please don't include this file directly"
6#endif
7
8#include <linux/types.h> 4#include <linux/types.h>
9 5
10#if (CONFIG_NR_CPUS < 256) 6#ifdef CONFIG_PARAVIRT_SPINLOCKS
7#define __TICKET_LOCK_INC 2
8#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
9#else
10#define __TICKET_LOCK_INC 1
11#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
12#endif
13
14#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
11typedef u8 __ticket_t; 15typedef u8 __ticket_t;
12typedef u16 __ticketpair_t; 16typedef u16 __ticketpair_t;
13#else 17#else
@@ -15,6 +19,8 @@ typedef u16 __ticket_t;
15typedef u32 __ticketpair_t; 19typedef u32 __ticketpair_t;
16#endif 20#endif
17 21
22#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
23
18#define TICKET_SHIFT (sizeof(__ticket_t) * 8) 24#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
19 25
20typedef struct arch_spinlock { 26typedef struct arch_spinlock {
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 4ec45b3abba1..d7f3b3b78ac3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -2,8 +2,8 @@
2#define _ASM_X86_SWITCH_TO_H 2#define _ASM_X86_SWITCH_TO_H
3 3
4struct task_struct; /* one of the stranger aspects of C forward declarations */ 4struct task_struct; /* one of the stranger aspects of C forward declarations */
5struct task_struct *__switch_to(struct task_struct *prev, 5__visible struct task_struct *__switch_to(struct task_struct *prev,
6 struct task_struct *next); 6 struct task_struct *next);
7struct tss_struct; 7struct tss_struct;
8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, 8void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
9 struct tss_struct *tss); 9 struct tss_struct *tss);
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 9d09b4073b60..05af3b31d522 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -26,9 +26,9 @@
26 * Note that @nr may be almost arbitrarily large; this function is not 26 * Note that @nr may be almost arbitrarily large; this function is not
27 * restricted to acting on a single-word quantity. 27 * restricted to acting on a single-word quantity.
28 */ 28 */
29static inline void sync_set_bit(int nr, volatile unsigned long *addr) 29static inline void sync_set_bit(long nr, volatile unsigned long *addr)
30{ 30{
31 asm volatile("lock; btsl %1,%0" 31 asm volatile("lock; bts %1,%0"
32 : "+m" (ADDR) 32 : "+m" (ADDR)
33 : "Ir" (nr) 33 : "Ir" (nr)
34 : "memory"); 34 : "memory");
@@ -44,9 +44,9 @@ static inline void sync_set_bit(int nr, volatile unsigned long *addr)
44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 44 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
45 * in order to ensure changes are visible on other processors. 45 * in order to ensure changes are visible on other processors.
46 */ 46 */
47static inline void sync_clear_bit(int nr, volatile unsigned long *addr) 47static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
48{ 48{
49 asm volatile("lock; btrl %1,%0" 49 asm volatile("lock; btr %1,%0"
50 : "+m" (ADDR) 50 : "+m" (ADDR)
51 : "Ir" (nr) 51 : "Ir" (nr)
52 : "memory"); 52 : "memory");
@@ -61,9 +61,9 @@ static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
61 * Note that @nr may be almost arbitrarily large; this function is not 61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity. 62 * restricted to acting on a single-word quantity.
63 */ 63 */
64static inline void sync_change_bit(int nr, volatile unsigned long *addr) 64static inline void sync_change_bit(long nr, volatile unsigned long *addr)
65{ 65{
66 asm volatile("lock; btcl %1,%0" 66 asm volatile("lock; btc %1,%0"
67 : "+m" (ADDR) 67 : "+m" (ADDR)
68 : "Ir" (nr) 68 : "Ir" (nr)
69 : "memory"); 69 : "memory");
@@ -77,11 +77,11 @@ static inline void sync_change_bit(int nr, volatile unsigned long *addr)
77 * This operation is atomic and cannot be reordered. 77 * This operation is atomic and cannot be reordered.
78 * It also implies a memory barrier. 78 * It also implies a memory barrier.
79 */ 79 */
80static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) 80static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
81{ 81{
82 int oldbit; 82 int oldbit;
83 83
84 asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" 84 asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
85 : "=r" (oldbit), "+m" (ADDR) 85 : "=r" (oldbit), "+m" (ADDR)
86 : "Ir" (nr) : "memory"); 86 : "Ir" (nr) : "memory");
87 return oldbit; 87 return oldbit;
@@ -95,11 +95,11 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
95 * This operation is atomic and cannot be reordered. 95 * This operation is atomic and cannot be reordered.
96 * It also implies a memory barrier. 96 * It also implies a memory barrier.
97 */ 97 */
98static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) 98static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
99{ 99{
100 int oldbit; 100 int oldbit;
101 101
102 asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" 102 asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
103 : "=r" (oldbit), "+m" (ADDR) 103 : "=r" (oldbit), "+m" (ADDR)
104 : "Ir" (nr) : "memory"); 104 : "Ir" (nr) : "memory");
105 return oldbit; 105 return oldbit;
@@ -113,11 +113,11 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
113 * This operation is atomic and cannot be reordered. 113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier. 114 * It also implies a memory barrier.
115 */ 115 */
116static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) 116static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
117{ 117{
118 int oldbit; 118 int oldbit;
119 119
120 asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" 120 asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
121 : "=r" (oldbit), "+m" (ADDR) 121 : "=r" (oldbit), "+m" (ADDR)
122 : "Ir" (nr) : "memory"); 122 : "Ir" (nr) : "memory");
123 return oldbit; 123 return oldbit;
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 2e188d68397c..aea284b41312 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -20,7 +20,8 @@
20#include <asm/thread_info.h> /* for TS_COMPAT */ 20#include <asm/thread_info.h> /* for TS_COMPAT */
21#include <asm/unistd.h> 21#include <asm/unistd.h>
22 22
23extern const unsigned long sys_call_table[]; 23typedef void (*sys_call_ptr_t)(void);
24extern const sys_call_ptr_t sys_call_table[];
24 25
25/* 26/*
26 * Only the low 32 bits of orig_ax are meaningful, so we return int. 27 * Only the low 32 bits of orig_ax are meaningful, so we return int.
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 2917a6452c49..592a6a672e07 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -24,7 +24,7 @@ asmlinkage long sys_iopl(unsigned int);
24asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); 24asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
25 25
26/* kernel/signal.c */ 26/* kernel/signal.c */
27long sys_rt_sigreturn(void); 27asmlinkage long sys_rt_sigreturn(void);
28 28
29/* kernel/tls.c */ 29/* kernel/tls.c */
30asmlinkage long sys_set_thread_area(struct user_desc __user *); 30asmlinkage long sys_set_thread_area(struct user_desc __user *);
@@ -34,7 +34,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
34#ifdef CONFIG_X86_32 34#ifdef CONFIG_X86_32
35 35
36/* kernel/signal.c */ 36/* kernel/signal.c */
37unsigned long sys_sigreturn(void); 37asmlinkage unsigned long sys_sigreturn(void);
38 38
39/* kernel/vm86_32.c */ 39/* kernel/vm86_32.c */
40asmlinkage long sys_vm86old(struct vm86_struct __user *); 40asmlinkage long sys_vm86old(struct vm86_struct __user *);
@@ -44,7 +44,7 @@ asmlinkage long sys_vm86(unsigned long, unsigned long);
44 44
45/* X86_64 only */ 45/* X86_64 only */
46/* kernel/process_64.c */ 46/* kernel/process_64.c */
47long sys_arch_prctl(int, unsigned long); 47asmlinkage long sys_arch_prctl(int, unsigned long);
48 48
49/* kernel/sys_x86_64.c */ 49/* kernel/sys_x86_64.c */
50asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long, 50asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/sysfb.h b/arch/x86/include/asm/sysfb.h
new file mode 100644
index 000000000000..2aeb3e25579c
--- /dev/null
+++ b/arch/x86/include/asm/sysfb.h
@@ -0,0 +1,98 @@
1#ifndef _ARCH_X86_KERNEL_SYSFB_H
2#define _ARCH_X86_KERNEL_SYSFB_H
3
4/*
5 * Generic System Framebuffers on x86
6 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/platform_data/simplefb.h>
16#include <linux/screen_info.h>
17
18enum {
19 M_I17, /* 17-Inch iMac */
20 M_I20, /* 20-Inch iMac */
21 M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
22 M_I24, /* 24-Inch iMac */
23 M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
24 M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
25 M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
26 M_MINI, /* Mac Mini */
27 M_MINI_3_1, /* Mac Mini, 3,1th gen */
28 M_MINI_4_1, /* Mac Mini, 4,1th gen */
29 M_MB, /* MacBook */
30 M_MB_2, /* MacBook, 2nd rev. */
31 M_MB_3, /* MacBook, 3rd rev. */
32 M_MB_5_1, /* MacBook, 5th rev. */
33 M_MB_6_1, /* MacBook, 6th rev. */
34 M_MB_7_1, /* MacBook, 7th rev. */
35 M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
36 M_MBA, /* MacBook Air */
37 M_MBA_3, /* Macbook Air, 3rd rev */
38 M_MBP, /* MacBook Pro */
39 M_MBP_2, /* MacBook Pro 2nd gen */
40 M_MBP_2_2, /* MacBook Pro 2,2nd gen */
41 M_MBP_SR, /* MacBook Pro (Santa Rosa) */
42 M_MBP_4, /* MacBook Pro, 4th gen */
43 M_MBP_5_1, /* MacBook Pro, 5,1th gen */
44 M_MBP_5_2, /* MacBook Pro, 5,2th gen */
45 M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
46 M_MBP_6_1, /* MacBook Pro, 6,1th gen */
47 M_MBP_6_2, /* MacBook Pro, 6,2th gen */
48 M_MBP_7_1, /* MacBook Pro, 7,1th gen */
49 M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
50 M_UNKNOWN /* placeholder */
51};
52
53struct efifb_dmi_info {
54 char *optname;
55 unsigned long base;
56 int stride;
57 int width;
58 int height;
59 int flags;
60};
61
62#ifdef CONFIG_EFI
63
64extern struct efifb_dmi_info efifb_dmi_list[];
65void sysfb_apply_efi_quirks(void);
66
67#else /* CONFIG_EFI */
68
69static inline void sysfb_apply_efi_quirks(void)
70{
71}
72
73#endif /* CONFIG_EFI */
74
75#ifdef CONFIG_X86_SYSFB
76
77bool parse_mode(const struct screen_info *si,
78 struct simplefb_platform_data *mode);
79int create_simplefb(const struct screen_info *si,
80 const struct simplefb_platform_data *mode);
81
82#else /* CONFIG_X86_SYSFB */
83
84static inline bool parse_mode(const struct screen_info *si,
85 struct simplefb_platform_data *mode)
86{
87 return false;
88}
89
90static inline int create_simplefb(const struct screen_info *si,
91 const struct simplefb_platform_data *mode)
92{
93 return -EINVAL;
94}
95
96#endif /* CONFIG_X86_SYSFB */
97
98#endif /* _ARCH_X86_KERNEL_SYSFB_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 095b21507b6a..d35f24e231cd 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -124,9 +124,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
124#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) 124#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
125#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 125#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
126#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 126#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
127
128/* indicates that pointers to the topology cpumask_t maps are valid */
129#define arch_provides_topology_pointers yes
130#endif 127#endif
131 128
132static inline void arch_fix_phys_package_id(int num, u32 slot) 129static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 88eae2aec619..7036cb60cd87 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -6,11 +6,7 @@
6#include <asm/debugreg.h> 6#include <asm/debugreg.h>
7#include <asm/siginfo.h> /* TRAP_TRACE, ... */ 7#include <asm/siginfo.h> /* TRAP_TRACE, ... */
8 8
9#ifdef CONFIG_X86_32 9#define dotraplinkage __visible
10#define dotraplinkage
11#else
12#define dotraplinkage asmlinkage
13#endif
14 10
15asmlinkage void divide_error(void); 11asmlinkage void divide_error(void);
16asmlinkage void debug(void); 12asmlinkage void debug(void);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c91e8b9d588b..235be70d5bb4 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -49,6 +49,7 @@ extern void tsc_init(void);
49extern void mark_tsc_unstable(char *reason); 49extern void mark_tsc_unstable(char *reason);
50extern int unsynchronized_tsc(void); 50extern int unsynchronized_tsc(void);
51extern int check_tsc_unstable(void); 51extern int check_tsc_unstable(void);
52extern int check_tsc_disabled(void);
52extern unsigned long native_calibrate_tsc(void); 53extern unsigned long native_calibrate_tsc(void);
53 54
54extern int tsc_clocksource_reliable; 55extern int tsc_clocksource_reliable;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5ee26875baea..5838fa911aa0 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -153,16 +153,19 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
153 * Careful: we have to cast the result to the type of the pointer 153 * Careful: we have to cast the result to the type of the pointer
154 * for sign reasons. 154 * for sign reasons.
155 * 155 *
156 * The use of %edx as the register specifier is a bit of a 156 * The use of _ASM_DX as the register specifier is a bit of a
157 * simplification, as gcc only cares about it as the starting point 157 * simplification, as gcc only cares about it as the starting point
158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits 158 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
159 * (%ecx being the next register in gcc's x86 register sequence), and 159 * (%ecx being the next register in gcc's x86 register sequence), and
160 * %rdx on 64 bits. 160 * %rdx on 64 bits.
161 *
162 * Clang/LLVM cares about the size of the register, but still wants
163 * the base register for something that ends up being a pair.
161 */ 164 */
162#define get_user(x, ptr) \ 165#define get_user(x, ptr) \
163({ \ 166({ \
164 int __ret_gu; \ 167 int __ret_gu; \
165 register __inttype(*(ptr)) __val_gu asm("%edx"); \ 168 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
166 __chk_user_ptr(ptr); \ 169 __chk_user_ptr(ptr); \
167 might_fault(); \ 170 might_fault(); \
168 asm volatile("call __get_user_%P3" \ 171 asm volatile("call __get_user_%P3" \
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index f3e01a2cbaa1..966502d4682e 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -387,6 +387,7 @@ enum vmcs_field {
387#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0 387#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
388#define VMX_EPT_EXTENT_CONTEXT 1 388#define VMX_EPT_EXTENT_CONTEXT 1
389#define VMX_EPT_EXTENT_GLOBAL 2 389#define VMX_EPT_EXTENT_GLOBAL 2
390#define VMX_EPT_EXTENT_SHIFT 24
390 391
391#define VMX_EPT_EXECUTE_ONLY_BIT (1ull) 392#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
392#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6) 393#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
@@ -394,6 +395,7 @@ enum vmcs_field {
394#define VMX_EPTP_WB_BIT (1ull << 14) 395#define VMX_EPTP_WB_BIT (1ull << 14)
395#define VMX_EPT_2MB_PAGE_BIT (1ull << 16) 396#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
396#define VMX_EPT_1GB_PAGE_BIT (1ull << 17) 397#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
398#define VMX_EPT_INVEPT_BIT (1ull << 20)
397#define VMX_EPT_AD_BIT (1ull << 21) 399#define VMX_EPT_AD_BIT (1ull << 21)
398#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25) 400#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
399#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26) 401#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index de656ac2af41..d76ac40da206 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -35,7 +35,7 @@
35 35
36#define DEFINE_VVAR(type, name) \ 36#define DEFINE_VVAR(type, name) \
37 type name \ 37 type name \
38 __attribute__((section(".vvar_" #name), aligned(16))) 38 __attribute__((section(".vvar_" #name), aligned(16))) __visible
39 39
40#define VVAR(name) (*vvaraddr_ ## name) 40#define VVAR(name) (*vvaraddr_ ## name)
41 41
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index ca842f2769ef..608a79d5a466 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -7,6 +7,7 @@ enum ipi_vector {
7 XEN_CALL_FUNCTION_SINGLE_VECTOR, 7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR, 8 XEN_SPIN_UNLOCK_VECTOR,
9 XEN_IRQ_WORK_VECTOR, 9 XEN_IRQ_WORK_VECTOR,
10 XEN_NMI_VECTOR,
10 11
11 XEN_NR_IPIS, 12 XEN_NR_IPIS,
12}; 13};
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 125f344f06a9..d866959e5685 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -40,21 +40,7 @@ extern struct start_info *xen_start_info;
40 40
41static inline uint32_t xen_cpuid_base(void) 41static inline uint32_t xen_cpuid_base(void)
42{ 42{
43 uint32_t base, eax, ebx, ecx, edx; 43 return hypervisor_cpuid_base("XenVMMXenVMM", 2);
44 char signature[13];
45
46 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
47 cpuid(base, &eax, &ebx, &ecx, &edx);
48 *(uint32_t *)(signature + 0) = ebx;
49 *(uint32_t *)(signature + 4) = ecx;
50 *(uint32_t *)(signature + 8) = edx;
51 signature[12] = 0;
52
53 if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
54 return base;
55 }
56
57 return 0;
58} 44}
59 45
60#ifdef CONFIG_XEN 46#ifdef CONFIG_XEN
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 06fdbd987e97..94dc8ca434e0 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -23,6 +23,7 @@
23#define KVM_FEATURE_ASYNC_PF 4 23#define KVM_FEATURE_ASYNC_PF 4
24#define KVM_FEATURE_STEAL_TIME 5 24#define KVM_FEATURE_STEAL_TIME 5
25#define KVM_FEATURE_PV_EOI 6 25#define KVM_FEATURE_PV_EOI 6
26#define KVM_FEATURE_PV_UNHALT 7
26 27
27/* The last 8 bits are used to indicate how to interpret the flags field 28/* The last 8 bits are used to indicate how to interpret the flags field
28 * in pvclock structure. If no bits are set, all flags are ignored. 29 * in pvclock structure. If no bits are set, all flags are ignored.
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index d651082c7cf7..0e79420376eb 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -65,6 +65,7 @@
65#define EXIT_REASON_EOI_INDUCED 45 65#define EXIT_REASON_EOI_INDUCED 45
66#define EXIT_REASON_EPT_VIOLATION 48 66#define EXIT_REASON_EPT_VIOLATION 48
67#define EXIT_REASON_EPT_MISCONFIG 49 67#define EXIT_REASON_EPT_MISCONFIG 49
68#define EXIT_REASON_INVEPT 50
68#define EXIT_REASON_PREEMPTION_TIMER 52 69#define EXIT_REASON_PREEMPTION_TIMER 52
69#define EXIT_REASON_WBINVD 54 70#define EXIT_REASON_WBINVD 54
70#define EXIT_REASON_XSETBV 55 71#define EXIT_REASON_XSETBV 55
@@ -106,12 +107,13 @@
106 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ 107 { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
107 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ 108 { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
108 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ 109 { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
110 { EXIT_REASON_INVEPT, "INVEPT" }, \
111 { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
109 { EXIT_REASON_WBINVD, "WBINVD" }, \ 112 { EXIT_REASON_WBINVD, "WBINVD" }, \
110 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ 113 { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
111 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ 114 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
112 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ 115 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
113 { EXIT_REASON_INVD, "INVD" }, \ 116 { EXIT_REASON_INVD, "INVD" }, \
114 { EXIT_REASON_INVPCID, "INVPCID" }, \ 117 { EXIT_REASON_INVPCID, "INVPCID" }
115 { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }
116 118
117#endif /* _UAPIVMX_H */ 119#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 88d99ea77723..a5408b965c9d 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -103,6 +103,9 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
103obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 103obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
104obj-$(CONFIG_OF) += devicetree.o 104obj-$(CONFIG_OF) += devicetree.o
105obj-$(CONFIG_UPROBES) += uprobes.o 105obj-$(CONFIG_UPROBES) += uprobes.o
106obj-y += sysfb.o
107obj-$(CONFIG_X86_SYSFB) += sysfb_simplefb.o
108obj-$(CONFIG_EFI) += sysfb_efi.o
106 109
107obj-$(CONFIG_PERF_EVENTS) += perf_regs.o 110obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
108obj-$(CONFIG_TRACING) += tracepoint.o 111obj-$(CONFIG_TRACING) += tracepoint.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2627a81253ee..40c76604199f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -67,6 +67,7 @@ EXPORT_SYMBOL(acpi_pci_disabled);
67int acpi_lapic; 67int acpi_lapic;
68int acpi_ioapic; 68int acpi_ioapic;
69int acpi_strict; 69int acpi_strict;
70int acpi_disable_cmcff;
70 71
71u8 acpi_sci_flags __initdata; 72u8 acpi_sci_flags __initdata;
72int acpi_sci_override_gsi __initdata; 73int acpi_sci_override_gsi __initdata;
@@ -141,16 +142,8 @@ static u32 irq_to_gsi(int irq)
141} 142}
142 143
143/* 144/*
144 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END, 145 * This is just a simple wrapper around early_ioremap(),
145 * to map the target physical address. The problem is that set_fixmap() 146 * with sanity checks for phys == 0 and size == 0.
146 * provides a single page, and it is possible that the page is not
147 * sufficient.
148 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
149 * i.e. until the next __va_range() call.
150 *
151 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
152 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
153 * count idx down while incrementing the phys address.
154 */ 147 */
155char *__init __acpi_map_table(unsigned long phys, unsigned long size) 148char *__init __acpi_map_table(unsigned long phys, unsigned long size)
156{ 149{
@@ -160,6 +153,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
160 153
161 return early_ioremap(phys, size); 154 return early_ioremap(phys, size);
162} 155}
156
163void __init __acpi_unmap_table(char *map, unsigned long size) 157void __init __acpi_unmap_table(char *map, unsigned long size)
164{ 158{
165 if (!map || !size) 159 if (!map || !size)
@@ -199,7 +193,7 @@ static void acpi_register_lapic(int id, u8 enabled)
199{ 193{
200 unsigned int ver = 0; 194 unsigned int ver = 0;
201 195
202 if (id >= (MAX_LOCAL_APIC-1)) { 196 if (id >= MAX_LOCAL_APIC) {
203 printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); 197 printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
204 return; 198 return;
205 } 199 }
@@ -1120,6 +1114,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
1120 int ioapic; 1114 int ioapic;
1121 int ioapic_pin; 1115 int ioapic_pin;
1122 struct io_apic_irq_attr irq_attr; 1116 struct io_apic_irq_attr irq_attr;
1117 int ret;
1123 1118
1124 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) 1119 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
1125 return gsi; 1120 return gsi;
@@ -1149,7 +1144,9 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
1149 set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, 1144 set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
1150 trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, 1145 trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
1151 polarity == ACPI_ACTIVE_HIGH ? 0 : 1); 1146 polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
1152 io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); 1147 ret = io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
1148 if (ret < 0)
1149 gsi = INT_MIN;
1153 1150
1154 return gsi; 1151 return gsi;
1155} 1152}
@@ -1626,6 +1623,10 @@ static int __init parse_acpi(char *arg)
1626 /* "acpi=copy_dsdt" copys DSDT */ 1623 /* "acpi=copy_dsdt" copys DSDT */
1627 else if (strcmp(arg, "copy_dsdt") == 0) { 1624 else if (strcmp(arg, "copy_dsdt") == 0) {
1628 acpi_gbl_copy_dsdt_locally = 1; 1625 acpi_gbl_copy_dsdt_locally = 1;
1626 }
1627 /* "acpi=nocmcff" disables FF mode for corrected errors */
1628 else if (strcmp(arg, "nocmcff") == 0) {
1629 acpi_disable_cmcff = 1;
1629 } else { 1630 } else {
1630 /* Core will printk when we return error. */ 1631 /* Core will printk when we return error. */
1631 return -EINVAL; 1632 return -EINVAL;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c15cf9a25e27..15e8563e5c24 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,7 @@
11#include <linux/memory.h> 11#include <linux/memory.h>
12#include <linux/stop_machine.h> 12#include <linux/stop_machine.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/kdebug.h>
14#include <asm/alternative.h> 15#include <asm/alternative.h>
15#include <asm/sections.h> 16#include <asm/sections.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
@@ -596,97 +597,93 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
596 return addr; 597 return addr;
597} 598}
598 599
599/* 600static void do_sync_core(void *info)
600 * Cross-modifying kernel text with stop_machine(). 601{
601 * This code originally comes from immediate value. 602 sync_core();
602 */ 603}
603static atomic_t stop_machine_first;
604static int wrote_text;
605 604
606struct text_poke_params { 605static bool bp_patching_in_progress;
607 struct text_poke_param *params; 606static void *bp_int3_handler, *bp_int3_addr;
608 int nparams;
609};
610 607
611static int __kprobes stop_machine_text_poke(void *data) 608int poke_int3_handler(struct pt_regs *regs)
612{ 609{
613 struct text_poke_params *tpp = data; 610 /* bp_patching_in_progress */
614 struct text_poke_param *p; 611 smp_rmb();
615 int i;
616 612
617 if (atomic_xchg(&stop_machine_first, 0)) { 613 if (likely(!bp_patching_in_progress))
618 for (i = 0; i < tpp->nparams; i++) { 614 return 0;
619 p = &tpp->params[i];
620 text_poke(p->addr, p->opcode, p->len);
621 }
622 smp_wmb(); /* Make sure other cpus see that this has run */
623 wrote_text = 1;
624 } else {
625 while (!wrote_text)
626 cpu_relax();
627 smp_mb(); /* Load wrote_text before following execution */
628 }
629 615
630 for (i = 0; i < tpp->nparams; i++) { 616 if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
631 p = &tpp->params[i]; 617 return 0;
632 flush_icache_range((unsigned long)p->addr, 618
633 (unsigned long)p->addr + p->len); 619 /* set up the specified breakpoint handler */
634 } 620 regs->ip = (unsigned long) bp_int3_handler;
635 /* 621
636 * Intel Archiecture Software Developer's Manual section 7.1.3 specifies 622 return 1;
637 * that a core serializing instruction such as "cpuid" should be
638 * executed on _each_ core before the new instruction is made visible.
639 */
640 sync_core();
641 return 0;
642}
643 623
644/**
645 * text_poke_smp - Update instructions on a live kernel on SMP
646 * @addr: address to modify
647 * @opcode: source of the copy
648 * @len: length to copy
649 *
650 * Modify multi-byte instruction by using stop_machine() on SMP. This allows
651 * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
652 * should be allowed, since stop_machine() does _not_ protect code against
653 * NMI and MCE.
654 *
655 * Note: Must be called under get_online_cpus() and text_mutex.
656 */
657void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
658{
659 struct text_poke_params tpp;
660 struct text_poke_param p;
661
662 p.addr = addr;
663 p.opcode = opcode;
664 p.len = len;
665 tpp.params = &p;
666 tpp.nparams = 1;
667 atomic_set(&stop_machine_first, 1);
668 wrote_text = 0;
669 /* Use __stop_machine() because the caller already got online_cpus. */
670 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
671 return addr;
672} 624}
673 625
674/** 626/**
675 * text_poke_smp_batch - Update instructions on a live kernel on SMP 627 * text_poke_bp() -- update instructions on live kernel on SMP
676 * @params: an array of text_poke parameters 628 * @addr: address to patch
677 * @n: the number of elements in params. 629 * @opcode: opcode of new instruction
630 * @len: length to copy
631 * @handler: address to jump to when the temporary breakpoint is hit
678 * 632 *
679 * Modify multi-byte instruction by using stop_machine() on SMP. Since the 633 * Modify multi-byte instruction by using int3 breakpoint on SMP.
680 * stop_machine() is heavy task, it is better to aggregate text_poke requests 634 * We completely avoid stop_machine() here, and achieve the
681 * and do it once if possible. 635 * synchronization using int3 breakpoint.
682 * 636 *
683 * Note: Must be called under get_online_cpus() and text_mutex. 637 * The way it is done:
638 * - add a int3 trap to the address that will be patched
639 * - sync cores
640 * - update all but the first byte of the patched range
641 * - sync cores
642 * - replace the first byte (int3) by the first byte of
643 * replacing opcode
644 * - sync cores
645 *
646 * Note: must be called under text_mutex.
684 */ 647 */
685void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) 648void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
686{ 649{
687 struct text_poke_params tpp = {.params = params, .nparams = n}; 650 unsigned char int3 = 0xcc;
651
652 bp_int3_handler = handler;
653 bp_int3_addr = (u8 *)addr + sizeof(int3);
654 bp_patching_in_progress = true;
655 /*
656 * Corresponding read barrier in int3 notifier for
657 * making sure the in_progress flags is correctly ordered wrt.
658 * patching
659 */
660 smp_wmb();
661
662 text_poke(addr, &int3, sizeof(int3));
688 663
689 atomic_set(&stop_machine_first, 1); 664 on_each_cpu(do_sync_core, NULL, 1);
690 wrote_text = 0; 665
691 __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); 666 if (len - sizeof(int3) > 0) {
667 /* patch all but the first byte */
668 text_poke((char *)addr + sizeof(int3),
669 (const char *) opcode + sizeof(int3),
670 len - sizeof(int3));
671 /*
672 * According to Intel, this core syncing is very likely
673 * not necessary and we'd be safe even without it. But
674 * better safe than sorry (plus there's not only Intel).
675 */
676 on_each_cpu(do_sync_core, NULL, 1);
677 }
678
679 /* patch the first byte */
680 text_poke(addr, opcode, sizeof(int3));
681
682 on_each_cpu(do_sync_core, NULL, 1);
683
684 bp_patching_in_progress = false;
685 smp_wmb();
686
687 return addr;
692} 688}
689
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3048ded1b598..59554dca96ec 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 20 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) }, 21 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) }, 22 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
23 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
24 {} 25 {}
25}; 26};
@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
27 28
28static const struct pci_device_id amd_nb_link_ids[] = { 29static const struct pci_device_id amd_nb_link_ids[] = {
29 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
31 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
30 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 32 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
31 {} 33 {}
32}; 34};
@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
81 next_northbridge(misc, amd_nb_misc_ids); 83 next_northbridge(misc, amd_nb_misc_ids);
82 node_to_amd_nb(i)->link = link = 84 node_to_amd_nb(i)->link = link =
83 next_northbridge(link, amd_nb_link_ids); 85 next_northbridge(link, amd_nb_link_ids);
84 } 86 }
85 87
88 /* GART present only on Fam15h upto model 0fh */
86 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 || 89 if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
87 boot_cpu_data.x86 == 0x15) 90 (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
88 amd_northbridges.flags |= AMD_NB_GART; 91 amd_northbridges.flags |= AMD_NB_GART;
89 92
90 /* 93 /*
94 * Check for L3 cache presence.
95 */
96 if (!cpuid_edx(0x80000006))
97 return 0;
98
99 /*
91 * Some CPU families support L3 Cache Index Disable. There are some 100 * Some CPU families support L3 Cache Index Disable. There are some
92 * limitations because of E382 and E388 on family 0x10. 101 * limitations because of E382 and E388 on family 0x10.
93 */ 102 */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index eca89c53a7f5..a7eb82d9b012 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -913,7 +913,7 @@ static void local_apic_timer_interrupt(void)
913 * [ if a single-CPU system runs an SMP kernel then we call the local 913 * [ if a single-CPU system runs an SMP kernel then we call the local
914 * interrupt as well. Thus we cannot inline the local irq ... ] 914 * interrupt as well. Thus we cannot inline the local irq ... ]
915 */ 915 */
916void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) 916__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
917{ 917{
918 struct pt_regs *old_regs = set_irq_regs(regs); 918 struct pt_regs *old_regs = set_irq_regs(regs);
919 919
@@ -932,7 +932,7 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
932 set_irq_regs(old_regs); 932 set_irq_regs(old_regs);
933} 933}
934 934
935void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs) 935__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
936{ 936{
937 struct pt_regs *old_regs = set_irq_regs(regs); 937 struct pt_regs *old_regs = set_irq_regs(regs);
938 938
@@ -1946,14 +1946,14 @@ static inline void __smp_spurious_interrupt(void)
1946 "should never happen.\n", smp_processor_id()); 1946 "should never happen.\n", smp_processor_id());
1947} 1947}
1948 1948
1949void smp_spurious_interrupt(struct pt_regs *regs) 1949__visible void smp_spurious_interrupt(struct pt_regs *regs)
1950{ 1950{
1951 entering_irq(); 1951 entering_irq();
1952 __smp_spurious_interrupt(); 1952 __smp_spurious_interrupt();
1953 exiting_irq(); 1953 exiting_irq();
1954} 1954}
1955 1955
1956void smp_trace_spurious_interrupt(struct pt_regs *regs) 1956__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
1957{ 1957{
1958 entering_irq(); 1958 entering_irq();
1959 trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR); 1959 trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
@@ -2002,14 +2002,14 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
2002 2002
2003} 2003}
2004 2004
2005void smp_error_interrupt(struct pt_regs *regs) 2005__visible void smp_error_interrupt(struct pt_regs *regs)
2006{ 2006{
2007 entering_irq(); 2007 entering_irq();
2008 __smp_error_interrupt(regs); 2008 __smp_error_interrupt(regs);
2009 exiting_irq(); 2009 exiting_irq();
2010} 2010}
2011 2011
2012void smp_trace_error_interrupt(struct pt_regs *regs) 2012__visible void smp_trace_error_interrupt(struct pt_regs *regs)
2013{ 2013{
2014 entering_irq(); 2014 entering_irq();
2015 trace_error_apic_entry(ERROR_APIC_VECTOR); 2015 trace_error_apic_entry(ERROR_APIC_VECTOR);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9ed796ccc32c..e63a5bd2a78f 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1534,6 +1534,11 @@ void intel_ir_io_apic_print_entries(unsigned int apic,
1534 } 1534 }
1535} 1535}
1536 1536
1537void ioapic_zap_locks(void)
1538{
1539 raw_spin_lock_init(&ioapic_lock);
1540}
1541
1537__apicdebuginit(void) print_IO_APIC(int ioapic_idx) 1542__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
1538{ 1543{
1539 union IO_APIC_reg_00 reg_00; 1544 union IO_APIC_reg_00 reg_00;
@@ -3375,12 +3380,15 @@ int io_apic_setup_irq_pin_once(unsigned int irq, int node,
3375{ 3380{
3376 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; 3381 unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin;
3377 int ret; 3382 int ret;
3383 struct IO_APIC_route_entry orig_entry;
3378 3384
3379 /* Avoid redundant programming */ 3385 /* Avoid redundant programming */
3380 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { 3386 if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) {
3381 pr_debug("Pin %d-%d already programmed\n", 3387 pr_debug("Pin %d-%d already programmed\n", mpc_ioapic_id(ioapic_idx), pin);
3382 mpc_ioapic_id(ioapic_idx), pin); 3388 orig_entry = ioapic_read_entry(attr->ioapic, pin);
3383 return 0; 3389 if (attr->trigger == orig_entry.trigger && attr->polarity == orig_entry.polarity)
3390 return 0;
3391 return -EBUSY;
3384 } 3392 }
3385 ret = io_apic_setup_irq_pin(irq, node, attr); 3393 ret = io_apic_setup_irq_pin(irq, node, attr);
3386 if (!ret) 3394 if (!ret)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 53a4e2744846..3ab03430211d 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -392,7 +392,7 @@ static struct cpuidle_device apm_cpuidle_device;
392/* 392/*
393 * Local variables 393 * Local variables
394 */ 394 */
395static struct { 395__visible struct {
396 unsigned long offset; 396 unsigned long offset;
397 unsigned short segment; 397 unsigned short segment;
398} apm_bios_entry; 398} apm_bios_entry;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5b..903a264af981 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -66,8 +66,8 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
66 * performance at the same time.. 66 * performance at the same time..
67 */ 67 */
68 68
69extern void vide(void); 69extern __visible void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".globl vide\n\t.align 4\nvide: ret");
71 71
72static void init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
512 512
513static const int amd_erratum_383[]; 513static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
516 516
517static void init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
729 value &= ~(1ULL << 24); 729 value &= ~(1ULL << 24);
730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value); 730 wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
731 731
732 if (cpu_has_amd_erratum(amd_erratum_383)) 732 if (cpu_has_amd_erratum(c, amd_erratum_383))
733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); 733 set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
734 } 734 }
735 735
736 if (cpu_has_amd_erratum(amd_erratum_400)) 736 if (cpu_has_amd_erratum(c, amd_erratum_400))
737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); 737 set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
738 738
739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); 739 rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
878static const int amd_erratum_383[] = 878static const int amd_erratum_383[] =
879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); 879 AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
880 880
881static bool cpu_has_amd_erratum(const int *erratum) 881
882static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
882{ 883{
883 struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
884 int osvw_id = *erratum++; 884 int osvw_id = *erratum++;
885 u32 range; 885 u32 range;
886 u32 ms; 886 u32 ms;
887 887
888 /*
889 * If called early enough that current_cpu_data hasn't been initialized
890 * yet, fall back to boot_cpu_data.
891 */
892 if (cpu->x86 == 0)
893 cpu = &boot_cpu_data;
894
895 if (cpu->x86_vendor != X86_VENDOR_AMD)
896 return false;
897
898 if (osvw_id >= 0 && osvw_id < 65536 && 888 if (osvw_id >= 0 && osvw_id < 65536 &&
899 cpu_has(cpu, X86_FEATURE_OSVW)) { 889 cpu_has(cpu, X86_FEATURE_OSVW)) {
900 u64 osvw_len; 890 u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 25eb2747b063..2793d1f095a2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1076,7 +1076,7 @@ struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
1076 (unsigned long) debug_idt_table }; 1076 (unsigned long) debug_idt_table };
1077 1077
1078DEFINE_PER_CPU_FIRST(union irq_stack_union, 1078DEFINE_PER_CPU_FIRST(union irq_stack_union,
1079 irq_stack_union) __aligned(PAGE_SIZE); 1079 irq_stack_union) __aligned(PAGE_SIZE) __visible;
1080 1080
1081/* 1081/*
1082 * The following four percpu variables are hot. Align current_task to 1082 * The following four percpu variables are hot. Align current_task to
@@ -1093,7 +1093,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
1093DEFINE_PER_CPU(char *, irq_stack_ptr) = 1093DEFINE_PER_CPU(char *, irq_stack_ptr) =
1094 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 1094 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
1095 1095
1096DEFINE_PER_CPU(unsigned int, irq_count) = -1; 1096DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
1097 1097
1098DEFINE_PER_CPU(struct task_struct *, fpu_owner_task); 1098DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
1099 1099
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 87279212d318..36ce402a3fa5 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -25,11 +25,6 @@
25#include <asm/processor.h> 25#include <asm/processor.h>
26#include <asm/hypervisor.h> 26#include <asm/hypervisor.h>
27 27
28/*
29 * Hypervisor detect order. This is specified explicitly here because
30 * some hypervisors might implement compatibility modes for other
31 * hypervisors and therefore need to be detected in specific sequence.
32 */
33static const __initconst struct hypervisor_x86 * const hypervisors[] = 28static const __initconst struct hypervisor_x86 * const hypervisors[] =
34{ 29{
35#ifdef CONFIG_XEN_PVHVM 30#ifdef CONFIG_XEN_PVHVM
@@ -49,15 +44,19 @@ static inline void __init
49detect_hypervisor_vendor(void) 44detect_hypervisor_vendor(void)
50{ 45{
51 const struct hypervisor_x86 *h, * const *p; 46 const struct hypervisor_x86 *h, * const *p;
47 uint32_t pri, max_pri = 0;
52 48
53 for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) { 49 for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
54 h = *p; 50 h = *p;
55 if (h->detect()) { 51 pri = h->detect();
52 if (pri != 0 && pri > max_pri) {
53 max_pri = pri;
56 x86_hyper = h; 54 x86_hyper = h;
57 printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
58 break;
59 } 55 }
60 } 56 }
57
58 if (max_pri)
59 printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name);
61} 60}
62 61
63void init_hypervisor(struct cpuinfo_x86 *c) 62void init_hypervisor(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 5b7d4fa5d3b7..09edd0b65fef 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -25,15 +25,18 @@ int mce_severity(struct mce *a, int tolerant, char **msg);
25struct dentry *mce_get_debugfs_dir(void); 25struct dentry *mce_get_debugfs_dir(void);
26 26
27extern struct mce_bank *mce_banks; 27extern struct mce_bank *mce_banks;
28extern mce_banks_t mce_banks_ce_disabled;
28 29
29#ifdef CONFIG_X86_MCE_INTEL 30#ifdef CONFIG_X86_MCE_INTEL
30unsigned long mce_intel_adjust_timer(unsigned long interval); 31unsigned long mce_intel_adjust_timer(unsigned long interval);
31void mce_intel_cmci_poll(void); 32void mce_intel_cmci_poll(void);
32void mce_intel_hcpu_update(unsigned long cpu); 33void mce_intel_hcpu_update(unsigned long cpu);
34void cmci_disable_bank(int bank);
33#else 35#else
34# define mce_intel_adjust_timer mce_adjust_timer_default 36# define mce_intel_adjust_timer mce_adjust_timer_default
35static inline void mce_intel_cmci_poll(void) { } 37static inline void mce_intel_cmci_poll(void) { }
36static inline void mce_intel_hcpu_update(unsigned long cpu) { } 38static inline void mce_intel_hcpu_update(unsigned long cpu) { }
39static inline void cmci_disable_bank(int bank) { }
37#endif 40#endif
38 41
39void mce_timer_kick(unsigned long interval); 42void mce_timer_kick(unsigned long interval);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 87a65c939bcd..b3218cdee95f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -97,6 +97,15 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
97 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL 97 [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
98}; 98};
99 99
100/*
101 * MCA banks controlled through firmware first for corrected errors.
102 * This is a global list of banks for which we won't enable CMCI and we
103 * won't poll. Firmware controls these banks and is responsible for
104 * reporting corrected errors through GHES. Uncorrected/recoverable
105 * errors are still notified through a machine check.
106 */
107mce_banks_t mce_banks_ce_disabled;
108
100static DEFINE_PER_CPU(struct work_struct, mce_work); 109static DEFINE_PER_CPU(struct work_struct, mce_work);
101 110
102static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); 111static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
@@ -1935,6 +1944,25 @@ static struct miscdevice mce_chrdev_device = {
1935 &mce_chrdev_ops, 1944 &mce_chrdev_ops,
1936}; 1945};
1937 1946
1947static void __mce_disable_bank(void *arg)
1948{
1949 int bank = *((int *)arg);
1950 __clear_bit(bank, __get_cpu_var(mce_poll_banks));
1951 cmci_disable_bank(bank);
1952}
1953
1954void mce_disable_bank(int bank)
1955{
1956 if (bank >= mca_cfg.banks) {
1957 pr_warn(FW_BUG
1958 "Ignoring request to disable invalid MCA bank %d.\n",
1959 bank);
1960 return;
1961 }
1962 set_bit(bank, mce_banks_ce_disabled);
1963 on_each_cpu(__mce_disable_bank, &bank, 1);
1964}
1965
1938/* 1966/*
1939 * mce=off Disables machine check 1967 * mce=off Disables machine check
1940 * mce=no_cmci Disables CMCI 1968 * mce=no_cmci Disables CMCI
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index d56405309dc1..4cfe0458ca66 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -203,6 +203,10 @@ static void cmci_discover(int banks)
203 if (test_bit(i, owned)) 203 if (test_bit(i, owned))
204 continue; 204 continue;
205 205
206 /* Skip banks in firmware first mode */
207 if (test_bit(i, mce_banks_ce_disabled))
208 continue;
209
206 rdmsrl(MSR_IA32_MCx_CTL2(i), val); 210 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
207 211
208 /* Already owned by someone else? */ 212 /* Already owned by someone else? */
@@ -271,6 +275,19 @@ void cmci_recheck(void)
271 local_irq_restore(flags); 275 local_irq_restore(flags);
272} 276}
273 277
278/* Caller must hold the lock on cmci_discover_lock */
279static void __cmci_disable_bank(int bank)
280{
281 u64 val;
282
283 if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
284 return;
285 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
286 val &= ~MCI_CTL2_CMCI_EN;
287 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
288 __clear_bit(bank, __get_cpu_var(mce_banks_owned));
289}
290
274/* 291/*
275 * Disable CMCI on this CPU for all banks it owns when it goes down. 292 * Disable CMCI on this CPU for all banks it owns when it goes down.
276 * This allows other CPUs to claim the banks on rediscovery. 293 * This allows other CPUs to claim the banks on rediscovery.
@@ -280,20 +297,12 @@ void cmci_clear(void)
280 unsigned long flags; 297 unsigned long flags;
281 int i; 298 int i;
282 int banks; 299 int banks;
283 u64 val;
284 300
285 if (!cmci_supported(&banks)) 301 if (!cmci_supported(&banks))
286 return; 302 return;
287 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 303 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
288 for (i = 0; i < banks; i++) { 304 for (i = 0; i < banks; i++)
289 if (!test_bit(i, __get_cpu_var(mce_banks_owned))) 305 __cmci_disable_bank(i);
290 continue;
291 /* Disable CMCI */
292 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
293 val &= ~MCI_CTL2_CMCI_EN;
294 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
295 __clear_bit(i, __get_cpu_var(mce_banks_owned));
296 }
297 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 306 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
298} 307}
299 308
@@ -327,6 +336,19 @@ void cmci_reenable(void)
327 cmci_discover(banks); 336 cmci_discover(banks);
328} 337}
329 338
339void cmci_disable_bank(int bank)
340{
341 int banks;
342 unsigned long flags;
343
344 if (!cmci_supported(&banks))
345 return;
346
347 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
348 __cmci_disable_bank(bank);
349 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
350}
351
330static void intel_init_cmci(void) 352static void intel_init_cmci(void)
331{ 353{
332 int banks; 354 int banks;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 8f4be53ea04b..71a39f3621ba 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -27,20 +27,23 @@
27struct ms_hyperv_info ms_hyperv; 27struct ms_hyperv_info ms_hyperv;
28EXPORT_SYMBOL_GPL(ms_hyperv); 28EXPORT_SYMBOL_GPL(ms_hyperv);
29 29
30static bool __init ms_hyperv_platform(void) 30static uint32_t __init ms_hyperv_platform(void)
31{ 31{
32 u32 eax; 32 u32 eax;
33 u32 hyp_signature[3]; 33 u32 hyp_signature[3];
34 34
35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR)) 35 if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
36 return false; 36 return 0;
37 37
38 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS, 38 cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
39 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]); 39 &eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
40 40
41 return eax >= HYPERV_CPUID_MIN && 41 if (eax >= HYPERV_CPUID_MIN &&
42 eax <= HYPERV_CPUID_MAX && 42 eax <= HYPERV_CPUID_MAX &&
43 !memcmp("Microsoft Hv", hyp_signature, 12); 43 !memcmp("Microsoft Hv", hyp_signature, 12))
44 return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
45
46 return 0;
44} 47}
45 48
46static cycle_t read_hv_clock(struct clocksource *arg) 49static cycle_t read_hv_clock(struct clocksource *arg)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a7c7305030cc..8355c84b9729 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1884,6 +1884,7 @@ static struct pmu pmu = {
1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 1884void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1885{ 1885{
1886 userpg->cap_usr_time = 0; 1886 userpg->cap_usr_time = 0;
1887 userpg->cap_usr_time_zero = 0;
1887 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
1888 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1889 1890
@@ -1897,6 +1898,11 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1897 userpg->time_mult = this_cpu_read(cyc2ns); 1898 userpg->time_mult = this_cpu_read(cyc2ns);
1898 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1899 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1899 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901
1902 if (sched_clock_stable && !check_tsc_disabled()) {
1903 userpg->cap_usr_time_zero = 1;
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1900} 1906}
1901 1907
1902/* 1908/*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 97e557bc4c91..cc16faae0538 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -641,6 +641,8 @@ extern struct event_constraint intel_core2_pebs_event_constraints[];
641 641
642extern struct event_constraint intel_atom_pebs_event_constraints[]; 642extern struct event_constraint intel_atom_pebs_event_constraints[];
643 643
644extern struct event_constraint intel_slm_pebs_event_constraints[];
645
644extern struct event_constraint intel_nehalem_pebs_event_constraints[]; 646extern struct event_constraint intel_nehalem_pebs_event_constraints[];
645 647
646extern struct event_constraint intel_westmere_pebs_event_constraints[]; 648extern struct event_constraint intel_westmere_pebs_event_constraints[];
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 4cbe03287b08..beeb7cc07044 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -347,8 +347,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
347 struct amd_nb *nb; 347 struct amd_nb *nb;
348 int i; 348 int i;
349 349
350 nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, 350 nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
351 cpu_to_node(cpu));
352 if (!nb) 351 if (!nb)
353 return NULL; 352 return NULL;
354 353
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45bc..0abf6742a8b0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -81,7 +81,8 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
81 81
82static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = 82static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
83{ 83{
84 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 84 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
85 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
85 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 86 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
86 EVENT_EXTRA_END 87 EVENT_EXTRA_END
87}; 88};
@@ -143,8 +144,9 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
143 144
144static struct extra_reg intel_westmere_extra_regs[] __read_mostly = 145static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
145{ 146{
146 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), 147 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
147 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), 148 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
149 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
148 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), 150 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
149 EVENT_EXTRA_END 151 EVENT_EXTRA_END
150}; 152};
@@ -162,16 +164,27 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
162 EVENT_CONSTRAINT_END 164 EVENT_CONSTRAINT_END
163}; 165};
164 166
167static struct event_constraint intel_slm_event_constraints[] __read_mostly =
168{
169 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
170 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
171 FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
172 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
173 EVENT_CONSTRAINT_END
174};
175
165static struct extra_reg intel_snb_extra_regs[] __read_mostly = { 176static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
166 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), 177 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
167 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), 178 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
179 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
168 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 180 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
169 EVENT_EXTRA_END 181 EVENT_EXTRA_END
170}; 182};
171 183
172static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { 184static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
173 INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), 185 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
174 INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), 186 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
187 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
175 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), 188 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
176 EVENT_EXTRA_END 189 EVENT_EXTRA_END
177}; 190};
@@ -882,6 +895,140 @@ static __initconst const u64 atom_hw_cache_event_ids
882 }, 895 },
883}; 896};
884 897
898static struct extra_reg intel_slm_extra_regs[] __read_mostly =
899{
900 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
901 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0),
902 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1),
903 EVENT_EXTRA_END
904};
905
906#define SLM_DMND_READ SNB_DMND_DATA_RD
907#define SLM_DMND_WRITE SNB_DMND_RFO
908#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
909
910#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
911#define SLM_LLC_ACCESS SNB_RESP_ANY
912#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
913
914static __initconst const u64 slm_hw_cache_extra_regs
915 [PERF_COUNT_HW_CACHE_MAX]
916 [PERF_COUNT_HW_CACHE_OP_MAX]
917 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
918{
919 [ C(LL ) ] = {
920 [ C(OP_READ) ] = {
921 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
922 [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
923 },
924 [ C(OP_WRITE) ] = {
925 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
926 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
927 },
928 [ C(OP_PREFETCH) ] = {
929 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
930 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
931 },
932 },
933};
934
935static __initconst const u64 slm_hw_cache_event_ids
936 [PERF_COUNT_HW_CACHE_MAX]
937 [PERF_COUNT_HW_CACHE_OP_MAX]
938 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
939{
940 [ C(L1D) ] = {
941 [ C(OP_READ) ] = {
942 [ C(RESULT_ACCESS) ] = 0,
943 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
944 },
945 [ C(OP_WRITE) ] = {
946 [ C(RESULT_ACCESS) ] = 0,
947 [ C(RESULT_MISS) ] = 0,
948 },
949 [ C(OP_PREFETCH) ] = {
950 [ C(RESULT_ACCESS) ] = 0,
951 [ C(RESULT_MISS) ] = 0,
952 },
953 },
954 [ C(L1I ) ] = {
955 [ C(OP_READ) ] = {
956 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
957 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
958 },
959 [ C(OP_WRITE) ] = {
960 [ C(RESULT_ACCESS) ] = -1,
961 [ C(RESULT_MISS) ] = -1,
962 },
963 [ C(OP_PREFETCH) ] = {
964 [ C(RESULT_ACCESS) ] = 0,
965 [ C(RESULT_MISS) ] = 0,
966 },
967 },
968 [ C(LL ) ] = {
969 [ C(OP_READ) ] = {
970 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
971 [ C(RESULT_ACCESS) ] = 0x01b7,
972 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
973 [ C(RESULT_MISS) ] = 0x01b7,
974 },
975 [ C(OP_WRITE) ] = {
976 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
977 [ C(RESULT_ACCESS) ] = 0x01b7,
978 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
979 [ C(RESULT_MISS) ] = 0x01b7,
980 },
981 [ C(OP_PREFETCH) ] = {
982 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
983 [ C(RESULT_ACCESS) ] = 0x01b7,
984 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
985 [ C(RESULT_MISS) ] = 0x01b7,
986 },
987 },
988 [ C(DTLB) ] = {
989 [ C(OP_READ) ] = {
990 [ C(RESULT_ACCESS) ] = 0,
991 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
992 },
993 [ C(OP_WRITE) ] = {
994 [ C(RESULT_ACCESS) ] = 0,
995 [ C(RESULT_MISS) ] = 0,
996 },
997 [ C(OP_PREFETCH) ] = {
998 [ C(RESULT_ACCESS) ] = 0,
999 [ C(RESULT_MISS) ] = 0,
1000 },
1001 },
1002 [ C(ITLB) ] = {
1003 [ C(OP_READ) ] = {
1004 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1005 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1006 },
1007 [ C(OP_WRITE) ] = {
1008 [ C(RESULT_ACCESS) ] = -1,
1009 [ C(RESULT_MISS) ] = -1,
1010 },
1011 [ C(OP_PREFETCH) ] = {
1012 [ C(RESULT_ACCESS) ] = -1,
1013 [ C(RESULT_MISS) ] = -1,
1014 },
1015 },
1016 [ C(BPU ) ] = {
1017 [ C(OP_READ) ] = {
1018 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1019 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1020 },
1021 [ C(OP_WRITE) ] = {
1022 [ C(RESULT_ACCESS) ] = -1,
1023 [ C(RESULT_MISS) ] = -1,
1024 },
1025 [ C(OP_PREFETCH) ] = {
1026 [ C(RESULT_ACCESS) ] = -1,
1027 [ C(RESULT_MISS) ] = -1,
1028 },
1029 },
1030};
1031
885static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) 1032static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
886{ 1033{
887 /* user explicitly requested branch sampling */ 1034 /* user explicitly requested branch sampling */
@@ -1301,11 +1448,11 @@ static void intel_fixup_er(struct perf_event *event, int idx)
1301 1448
1302 if (idx == EXTRA_REG_RSP_0) { 1449 if (idx == EXTRA_REG_RSP_0) {
1303 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1450 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1304 event->hw.config |= 0x01b7; 1451 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
1305 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 1452 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1306 } else if (idx == EXTRA_REG_RSP_1) { 1453 } else if (idx == EXTRA_REG_RSP_1) {
1307 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1454 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1308 event->hw.config |= 0x01bb; 1455 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
1309 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 1456 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1310 } 1457 }
1311} 1458}
@@ -2176,6 +2323,21 @@ __init int intel_pmu_init(void)
2176 pr_cont("Atom events, "); 2323 pr_cont("Atom events, ");
2177 break; 2324 break;
2178 2325
2326 case 55: /* Atom 22nm "Silvermont" */
2327 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
2328 sizeof(hw_cache_event_ids));
2329 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
2330 sizeof(hw_cache_extra_regs));
2331
2332 intel_pmu_lbr_init_atom();
2333
2334 x86_pmu.event_constraints = intel_slm_event_constraints;
2335 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
2336 x86_pmu.extra_regs = intel_slm_extra_regs;
2337 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2338 pr_cont("Silvermont events, ");
2339 break;
2340
2179 case 37: /* 32 nm nehalem, "Clarkdale" */ 2341 case 37: /* 32 nm nehalem, "Clarkdale" */
2180 case 44: /* 32 nm nehalem, "Gulftown" */ 2342 case 44: /* 32 nm nehalem, "Gulftown" */
2181 case 47: /* 32 nm Xeon E7 */ 2343 case 47: /* 32 nm Xeon E7 */
@@ -2270,6 +2432,7 @@ __init int intel_pmu_init(void)
2270 case 70: 2432 case 70:
2271 case 71: 2433 case 71:
2272 case 63: 2434 case 63:
2435 case 69:
2273 x86_pmu.late_ack = true; 2436 x86_pmu.late_ack = true;
2274 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2437 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2275 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2438 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3065c57a63c1..63438aad177f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -224,7 +224,7 @@ static int alloc_pebs_buffer(int cpu)
224 if (!x86_pmu.pebs) 224 if (!x86_pmu.pebs)
225 return 0; 225 return 0;
226 226
227 buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); 227 buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
228 if (unlikely(!buffer)) 228 if (unlikely(!buffer))
229 return -ENOMEM; 229 return -ENOMEM;
230 230
@@ -262,7 +262,7 @@ static int alloc_bts_buffer(int cpu)
262 if (!x86_pmu.bts) 262 if (!x86_pmu.bts)
263 return 0; 263 return 0;
264 264
265 buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); 265 buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
266 if (unlikely(!buffer)) 266 if (unlikely(!buffer))
267 return -ENOMEM; 267 return -ENOMEM;
268 268
@@ -295,7 +295,7 @@ static int alloc_ds_buffer(int cpu)
295 int node = cpu_to_node(cpu); 295 int node = cpu_to_node(cpu);
296 struct debug_store *ds; 296 struct debug_store *ds;
297 297
298 ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); 298 ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
299 if (unlikely(!ds)) 299 if (unlikely(!ds))
300 return -ENOMEM; 300 return -ENOMEM;
301 301
@@ -517,6 +517,32 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
517 EVENT_CONSTRAINT_END 517 EVENT_CONSTRAINT_END
518}; 518};
519 519
520struct event_constraint intel_slm_pebs_event_constraints[] = {
521 INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */
522 INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */
523 INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */
524 INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */
525 INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */
526 INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */
527 INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */
528 INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */
529 INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */
530 INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */
531 INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */
532 INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */
533 INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */
534 INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */
535 INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */
536 INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */
537 INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */
538 INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */
539 INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */
540 INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */
541 INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */
542 INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */
543 EVENT_CONSTRAINT_END
544};
545
520struct event_constraint intel_nehalem_pebs_event_constraints[] = { 546struct event_constraint intel_nehalem_pebs_event_constraints[] = {
521 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ 547 INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
522 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ 548 INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde95..fd8011ed4dcd 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -6,6 +6,8 @@ static struct intel_uncore_type **pci_uncores = empty_uncore;
6/* pci bus to socket mapping */ 6/* pci bus to socket mapping */
7static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; 7static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
8 8
9static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
10
9static DEFINE_RAW_SPINLOCK(uncore_box_lock); 11static DEFINE_RAW_SPINLOCK(uncore_box_lock);
10 12
11/* mask of cpus that collect uncore events */ 13/* mask of cpus that collect uncore events */
@@ -45,6 +47,24 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
45DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); 47DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
46DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); 48DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
47DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); 49DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
50DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
51DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
52DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
53DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
54DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
55DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
56DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
57DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
58DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
59DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
60DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
61DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
62DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
63DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
64DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
65DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
66DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
67DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
48 68
49static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) 69static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
50{ 70{
@@ -281,7 +301,7 @@ static struct attribute *snbep_uncore_cbox_formats_attr[] = {
281}; 301};
282 302
283static struct attribute *snbep_uncore_pcu_formats_attr[] = { 303static struct attribute *snbep_uncore_pcu_formats_attr[] = {
284 &format_attr_event.attr, 304 &format_attr_event_ext.attr,
285 &format_attr_occ_sel.attr, 305 &format_attr_occ_sel.attr,
286 &format_attr_edge.attr, 306 &format_attr_edge.attr,
287 &format_attr_inv.attr, 307 &format_attr_inv.attr,
@@ -301,6 +321,24 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = {
301 &format_attr_edge.attr, 321 &format_attr_edge.attr,
302 &format_attr_inv.attr, 322 &format_attr_inv.attr,
303 &format_attr_thresh8.attr, 323 &format_attr_thresh8.attr,
324 &format_attr_match_rds.attr,
325 &format_attr_match_rnid30.attr,
326 &format_attr_match_rnid4.attr,
327 &format_attr_match_dnid.attr,
328 &format_attr_match_mc.attr,
329 &format_attr_match_opc.attr,
330 &format_attr_match_vnw.attr,
331 &format_attr_match0.attr,
332 &format_attr_match1.attr,
333 &format_attr_mask_rds.attr,
334 &format_attr_mask_rnid30.attr,
335 &format_attr_mask_rnid4.attr,
336 &format_attr_mask_dnid.attr,
337 &format_attr_mask_mc.attr,
338 &format_attr_mask_opc.attr,
339 &format_attr_mask_vnw.attr,
340 &format_attr_mask0.attr,
341 &format_attr_mask1.attr,
304 NULL, 342 NULL,
305}; 343};
306 344
@@ -314,8 +352,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
314static struct uncore_event_desc snbep_uncore_qpi_events[] = { 352static struct uncore_event_desc snbep_uncore_qpi_events[] = {
315 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), 353 INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
316 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), 354 INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
317 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), 355 INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
318 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), 356 INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
319 { /* end: all zeroes */ }, 357 { /* end: all zeroes */ },
320}; 358};
321 359
@@ -356,13 +394,16 @@ static struct intel_uncore_ops snbep_uncore_msr_ops = {
356 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 394 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
357}; 395};
358 396
397#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
398 .init_box = snbep_uncore_pci_init_box, \
399 .disable_box = snbep_uncore_pci_disable_box, \
400 .enable_box = snbep_uncore_pci_enable_box, \
401 .disable_event = snbep_uncore_pci_disable_event, \
402 .read_counter = snbep_uncore_pci_read_counter
403
359static struct intel_uncore_ops snbep_uncore_pci_ops = { 404static struct intel_uncore_ops snbep_uncore_pci_ops = {
360 .init_box = snbep_uncore_pci_init_box, 405 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
361 .disable_box = snbep_uncore_pci_disable_box, 406 .enable_event = snbep_uncore_pci_enable_event, \
362 .enable_box = snbep_uncore_pci_enable_box,
363 .disable_event = snbep_uncore_pci_disable_event,
364 .enable_event = snbep_uncore_pci_enable_event,
365 .read_counter = snbep_uncore_pci_read_counter,
366}; 407};
367 408
368static struct event_constraint snbep_uncore_cbox_constraints[] = { 409static struct event_constraint snbep_uncore_cbox_constraints[] = {
@@ -726,6 +767,61 @@ static struct intel_uncore_type *snbep_msr_uncores[] = {
726 NULL, 767 NULL,
727}; 768};
728 769
770enum {
771 SNBEP_PCI_QPI_PORT0_FILTER,
772 SNBEP_PCI_QPI_PORT1_FILTER,
773};
774
775static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
776{
777 struct hw_perf_event *hwc = &event->hw;
778 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
779 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
780
781 if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
782 reg1->idx = 0;
783 reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
784 reg1->config = event->attr.config1;
785 reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
786 reg2->config = event->attr.config2;
787 }
788 return 0;
789}
790
791static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
792{
793 struct pci_dev *pdev = box->pci_dev;
794 struct hw_perf_event *hwc = &event->hw;
795 struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
796 struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
797
798 if (reg1->idx != EXTRA_REG_NONE) {
799 int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
800 struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
801 WARN_ON_ONCE(!filter_pdev);
802 if (filter_pdev) {
803 pci_write_config_dword(filter_pdev, reg1->reg,
804 (u32)reg1->config);
805 pci_write_config_dword(filter_pdev, reg1->reg + 4,
806 (u32)(reg1->config >> 32));
807 pci_write_config_dword(filter_pdev, reg2->reg,
808 (u32)reg2->config);
809 pci_write_config_dword(filter_pdev, reg2->reg + 4,
810 (u32)(reg2->config >> 32));
811 }
812 }
813
814 pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
815}
816
817static struct intel_uncore_ops snbep_uncore_qpi_ops = {
818 SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
819 .enable_event = snbep_qpi_enable_event,
820 .hw_config = snbep_qpi_hw_config,
821 .get_constraint = uncore_get_constraint,
822 .put_constraint = uncore_put_constraint,
823};
824
729#define SNBEP_UNCORE_PCI_COMMON_INIT() \ 825#define SNBEP_UNCORE_PCI_COMMON_INIT() \
730 .perf_ctr = SNBEP_PCI_PMON_CTR0, \ 826 .perf_ctr = SNBEP_PCI_PMON_CTR0, \
731 .event_ctl = SNBEP_PCI_PMON_CTL0, \ 827 .event_ctl = SNBEP_PCI_PMON_CTL0, \
@@ -755,17 +851,18 @@ static struct intel_uncore_type snbep_uncore_imc = {
755}; 851};
756 852
757static struct intel_uncore_type snbep_uncore_qpi = { 853static struct intel_uncore_type snbep_uncore_qpi = {
758 .name = "qpi", 854 .name = "qpi",
759 .num_counters = 4, 855 .num_counters = 4,
760 .num_boxes = 2, 856 .num_boxes = 2,
761 .perf_ctr_bits = 48, 857 .perf_ctr_bits = 48,
762 .perf_ctr = SNBEP_PCI_PMON_CTR0, 858 .perf_ctr = SNBEP_PCI_PMON_CTR0,
763 .event_ctl = SNBEP_PCI_PMON_CTL0, 859 .event_ctl = SNBEP_PCI_PMON_CTL0,
764 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, 860 .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
765 .box_ctl = SNBEP_PCI_PMON_BOX_CTL, 861 .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
766 .ops = &snbep_uncore_pci_ops, 862 .num_shared_regs = 1,
767 .event_descs = snbep_uncore_qpi_events, 863 .ops = &snbep_uncore_qpi_ops,
768 .format_group = &snbep_uncore_qpi_format_group, 864 .event_descs = snbep_uncore_qpi_events,
865 .format_group = &snbep_uncore_qpi_format_group,
769}; 866};
770 867
771 868
@@ -807,43 +904,53 @@ static struct intel_uncore_type *snbep_pci_uncores[] = {
807static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { 904static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
808 { /* Home Agent */ 905 { /* Home Agent */
809 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), 906 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
810 .driver_data = SNBEP_PCI_UNCORE_HA, 907 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
811 }, 908 },
812 { /* MC Channel 0 */ 909 { /* MC Channel 0 */
813 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), 910 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
814 .driver_data = SNBEP_PCI_UNCORE_IMC, 911 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
815 }, 912 },
816 { /* MC Channel 1 */ 913 { /* MC Channel 1 */
817 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), 914 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
818 .driver_data = SNBEP_PCI_UNCORE_IMC, 915 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
819 }, 916 },
820 { /* MC Channel 2 */ 917 { /* MC Channel 2 */
821 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), 918 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
822 .driver_data = SNBEP_PCI_UNCORE_IMC, 919 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
823 }, 920 },
824 { /* MC Channel 3 */ 921 { /* MC Channel 3 */
825 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), 922 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
826 .driver_data = SNBEP_PCI_UNCORE_IMC, 923 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
827 }, 924 },
828 { /* QPI Port 0 */ 925 { /* QPI Port 0 */
829 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), 926 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
830 .driver_data = SNBEP_PCI_UNCORE_QPI, 927 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
831 }, 928 },
832 { /* QPI Port 1 */ 929 { /* QPI Port 1 */
833 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), 930 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
834 .driver_data = SNBEP_PCI_UNCORE_QPI, 931 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
835 }, 932 },
836 { /* R2PCIe */ 933 { /* R2PCIe */
837 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), 934 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
838 .driver_data = SNBEP_PCI_UNCORE_R2PCIE, 935 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
839 }, 936 },
840 { /* R3QPI Link 0 */ 937 { /* R3QPI Link 0 */
841 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), 938 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
842 .driver_data = SNBEP_PCI_UNCORE_R3QPI, 939 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
843 }, 940 },
844 { /* R3QPI Link 1 */ 941 { /* R3QPI Link 1 */
845 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), 942 PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
846 .driver_data = SNBEP_PCI_UNCORE_R3QPI, 943 .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
944 },
945 { /* QPI Port 0 filter */
946 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
947 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
948 SNBEP_PCI_QPI_PORT0_FILTER),
949 },
950 { /* QPI Port 0 filter */
951 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
952 .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
953 SNBEP_PCI_QPI_PORT1_FILTER),
847 }, 954 },
848 { /* end: all zeroes */ } 955 { /* end: all zeroes */ }
849}; 956};
@@ -1256,71 +1363,71 @@ static struct intel_uncore_type *ivt_pci_uncores[] = {
1256static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = { 1363static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
1257 { /* Home Agent 0 */ 1364 { /* Home Agent 0 */
1258 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), 1365 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
1259 .driver_data = IVT_PCI_UNCORE_HA, 1366 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
1260 }, 1367 },
1261 { /* Home Agent 1 */ 1368 { /* Home Agent 1 */
1262 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), 1369 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
1263 .driver_data = IVT_PCI_UNCORE_HA, 1370 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
1264 }, 1371 },
1265 { /* MC0 Channel 0 */ 1372 { /* MC0 Channel 0 */
1266 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), 1373 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
1267 .driver_data = IVT_PCI_UNCORE_IMC, 1374 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
1268 }, 1375 },
1269 { /* MC0 Channel 1 */ 1376 { /* MC0 Channel 1 */
1270 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), 1377 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
1271 .driver_data = IVT_PCI_UNCORE_IMC, 1378 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
1272 }, 1379 },
1273 { /* MC0 Channel 3 */ 1380 { /* MC0 Channel 3 */
1274 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), 1381 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
1275 .driver_data = IVT_PCI_UNCORE_IMC, 1382 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
1276 }, 1383 },
1277 { /* MC0 Channel 4 */ 1384 { /* MC0 Channel 4 */
1278 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), 1385 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
1279 .driver_data = IVT_PCI_UNCORE_IMC, 1386 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
1280 }, 1387 },
1281 { /* MC1 Channel 0 */ 1388 { /* MC1 Channel 0 */
1282 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), 1389 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
1283 .driver_data = IVT_PCI_UNCORE_IMC, 1390 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
1284 }, 1391 },
1285 { /* MC1 Channel 1 */ 1392 { /* MC1 Channel 1 */
1286 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), 1393 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
1287 .driver_data = IVT_PCI_UNCORE_IMC, 1394 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
1288 }, 1395 },
1289 { /* MC1 Channel 3 */ 1396 { /* MC1 Channel 3 */
1290 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), 1397 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
1291 .driver_data = IVT_PCI_UNCORE_IMC, 1398 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
1292 }, 1399 },
1293 { /* MC1 Channel 4 */ 1400 { /* MC1 Channel 4 */
1294 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), 1401 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
1295 .driver_data = IVT_PCI_UNCORE_IMC, 1402 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
1296 }, 1403 },
1297 { /* QPI0 Port 0 */ 1404 { /* QPI0 Port 0 */
1298 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), 1405 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
1299 .driver_data = IVT_PCI_UNCORE_QPI, 1406 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
1300 }, 1407 },
1301 { /* QPI0 Port 1 */ 1408 { /* QPI0 Port 1 */
1302 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), 1409 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
1303 .driver_data = IVT_PCI_UNCORE_QPI, 1410 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
1304 }, 1411 },
1305 { /* QPI1 Port 2 */ 1412 { /* QPI1 Port 2 */
1306 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), 1413 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
1307 .driver_data = IVT_PCI_UNCORE_QPI, 1414 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
1308 }, 1415 },
1309 { /* R2PCIe */ 1416 { /* R2PCIe */
1310 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), 1417 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
1311 .driver_data = IVT_PCI_UNCORE_R2PCIE, 1418 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
1312 }, 1419 },
1313 { /* R3QPI0 Link 0 */ 1420 { /* R3QPI0 Link 0 */
1314 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), 1421 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
1315 .driver_data = IVT_PCI_UNCORE_R3QPI, 1422 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
1316 }, 1423 },
1317 { /* R3QPI0 Link 1 */ 1424 { /* R3QPI0 Link 1 */
1318 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), 1425 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
1319 .driver_data = IVT_PCI_UNCORE_R3QPI, 1426 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
1320 }, 1427 },
1321 { /* R3QPI1 Link 2 */ 1428 { /* R3QPI1 Link 2 */
1322 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), 1429 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
1323 .driver_data = IVT_PCI_UNCORE_R3QPI, 1430 .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
1324 }, 1431 },
1325 { /* end: all zeroes */ } 1432 { /* end: all zeroes */ }
1326}; 1433};
@@ -2606,7 +2713,7 @@ struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cp
2606 2713
2607 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); 2714 size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
2608 2715
2609 box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); 2716 box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
2610 if (!box) 2717 if (!box)
2611 return NULL; 2718 return NULL;
2612 2719
@@ -3167,16 +3274,24 @@ static bool pcidrv_registered;
3167/* 3274/*
3168 * add a pci uncore device 3275 * add a pci uncore device
3169 */ 3276 */
3170static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) 3277static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3171{ 3278{
3172 struct intel_uncore_pmu *pmu; 3279 struct intel_uncore_pmu *pmu;
3173 struct intel_uncore_box *box; 3280 struct intel_uncore_box *box;
3174 int i, phys_id; 3281 struct intel_uncore_type *type;
3282 int phys_id;
3175 3283
3176 phys_id = pcibus_to_physid[pdev->bus->number]; 3284 phys_id = pcibus_to_physid[pdev->bus->number];
3177 if (phys_id < 0) 3285 if (phys_id < 0)
3178 return -ENODEV; 3286 return -ENODEV;
3179 3287
3288 if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
3289 extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
3290 pci_set_drvdata(pdev, NULL);
3291 return 0;
3292 }
3293
3294 type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
3180 box = uncore_alloc_box(type, 0); 3295 box = uncore_alloc_box(type, 0);
3181 if (!box) 3296 if (!box)
3182 return -ENOMEM; 3297 return -ENOMEM;
@@ -3185,21 +3300,11 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
3185 * for performance monitoring unit with multiple boxes, 3300 * for performance monitoring unit with multiple boxes,
3186 * each box has a different function id. 3301 * each box has a different function id.
3187 */ 3302 */
3188 for (i = 0; i < type->num_boxes; i++) { 3303 pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
3189 pmu = &type->pmus[i]; 3304 if (pmu->func_id < 0)
3190 if (pmu->func_id == pdev->devfn) 3305 pmu->func_id = pdev->devfn;
3191 break; 3306 else
3192 if (pmu->func_id < 0) { 3307 WARN_ON_ONCE(pmu->func_id != pdev->devfn);
3193 pmu->func_id = pdev->devfn;
3194 break;
3195 }
3196 pmu = NULL;
3197 }
3198
3199 if (!pmu) {
3200 kfree(box);
3201 return -EINVAL;
3202 }
3203 3308
3204 box->phys_id = phys_id; 3309 box->phys_id = phys_id;
3205 box->pci_dev = pdev; 3310 box->pci_dev = pdev;
@@ -3217,9 +3322,22 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
3217static void uncore_pci_remove(struct pci_dev *pdev) 3322static void uncore_pci_remove(struct pci_dev *pdev)
3218{ 3323{
3219 struct intel_uncore_box *box = pci_get_drvdata(pdev); 3324 struct intel_uncore_box *box = pci_get_drvdata(pdev);
3220 struct intel_uncore_pmu *pmu = box->pmu; 3325 struct intel_uncore_pmu *pmu;
3221 int cpu, phys_id = pcibus_to_physid[pdev->bus->number]; 3326 int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
3222 3327
3328 box = pci_get_drvdata(pdev);
3329 if (!box) {
3330 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
3331 if (extra_pci_dev[phys_id][i] == pdev) {
3332 extra_pci_dev[phys_id][i] = NULL;
3333 break;
3334 }
3335 }
3336 WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
3337 return;
3338 }
3339
3340 pmu = box->pmu;
3223 if (WARN_ON_ONCE(phys_id != box->phys_id)) 3341 if (WARN_ON_ONCE(phys_id != box->phys_id))
3224 return; 3342 return;
3225 3343
@@ -3240,12 +3358,6 @@ static void uncore_pci_remove(struct pci_dev *pdev)
3240 kfree(box); 3358 kfree(box);
3241} 3359}
3242 3360
3243static int uncore_pci_probe(struct pci_dev *pdev,
3244 const struct pci_device_id *id)
3245{
3246 return uncore_pci_add(pci_uncores[id->driver_data], pdev);
3247}
3248
3249static int __init uncore_pci_init(void) 3361static int __init uncore_pci_init(void)
3250{ 3362{
3251 int ret; 3363 int ret;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 47b3d00c9d89..a80ab71a883d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -12,6 +12,15 @@
12#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC 12#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
13#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) 13#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
14 14
15#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
16#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
17#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
18#define UNCORE_EXTRA_PCI_DEV 0xff
19#define UNCORE_EXTRA_PCI_DEV_MAX 2
20
21/* support up to 8 sockets */
22#define UNCORE_SOCKET_MAX 8
23
15#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) 24#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
16 25
17/* SNB event control */ 26/* SNB event control */
@@ -108,6 +117,7 @@
108 (SNBEP_PMON_CTL_EV_SEL_MASK | \ 117 (SNBEP_PMON_CTL_EV_SEL_MASK | \
109 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ 118 SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
110 SNBEP_PMON_CTL_EDGE_DET | \ 119 SNBEP_PMON_CTL_EDGE_DET | \
120 SNBEP_PMON_CTL_EV_SEL_EXT | \
111 SNBEP_PMON_CTL_INVERT | \ 121 SNBEP_PMON_CTL_INVERT | \
112 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ 122 SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
113 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ 123 SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 7076878404ec..628a059a9a06 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -93,7 +93,7 @@ static void __init vmware_platform_setup(void)
93 * serial key should be enough, as this will always have a VMware 93 * serial key should be enough, as this will always have a VMware
94 * specific string when running under VMware hypervisor. 94 * specific string when running under VMware hypervisor.
95 */ 95 */
96static bool __init vmware_platform(void) 96static uint32_t __init vmware_platform(void)
97{ 97{
98 if (cpu_has_hypervisor) { 98 if (cpu_has_hypervisor) {
99 unsigned int eax; 99 unsigned int eax;
@@ -102,12 +102,12 @@ static bool __init vmware_platform(void)
102 cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0], 102 cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
103 &hyper_vendor_id[1], &hyper_vendor_id[2]); 103 &hyper_vendor_id[1], &hyper_vendor_id[2]);
104 if (!memcmp(hyper_vendor_id, "VMwareVMware", 12)) 104 if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
105 return true; 105 return CPUID_VMWARE_INFO_LEAF;
106 } else if (dmi_available && dmi_name_in_serial("VMware") && 106 } else if (dmi_available && dmi_name_in_serial("VMware") &&
107 __vmware_platform()) 107 __vmware_platform())
108 return true; 108 return 1;
109 109
110 return false; 110 return 0;
111} 111}
112 112
113/* 113/*
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 74467feb4dc5..e0e0841eef45 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -128,7 +128,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
128 cpu_emergency_svm_disable(); 128 cpu_emergency_svm_disable();
129 129
130 lapic_shutdown(); 130 lapic_shutdown();
131#if defined(CONFIG_X86_IO_APIC) 131#ifdef CONFIG_X86_IO_APIC
132 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
133 ioapic_zap_locks();
132 disable_IO_APIC(); 134 disable_IO_APIC();
133#endif 135#endif
134#ifdef CONFIG_HPET_TIMER 136#ifdef CONFIG_HPET_TIMER
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d32abeabbda5..174da5fc5a7b 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
658 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of 658 * boot_params.e820_map, others are passed via SETUP_E820_EXT node of
659 * linked list of struct setup_data, which is parsed here. 659 * linked list of struct setup_data, which is parsed here.
660 */ 660 */
661void __init parse_e820_ext(struct setup_data *sdata) 661void __init parse_e820_ext(u64 phys_addr, u32 data_len)
662{ 662{
663 int entries; 663 int entries;
664 struct e820entry *extmap; 664 struct e820entry *extmap;
665 struct setup_data *sdata;
665 666
667 sdata = early_memremap(phys_addr, data_len);
666 entries = sdata->len / sizeof(struct e820entry); 668 entries = sdata->len / sizeof(struct e820entry);
667 extmap = (struct e820entry *)(sdata->data); 669 extmap = (struct e820entry *)(sdata->data);
668 __append_e820_map(extmap, entries); 670 __append_e820_map(extmap, entries);
669 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 671 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
672 early_iounmap(sdata, data_len);
670 printk(KERN_INFO "e820: extended physical RAM map:\n"); 673 printk(KERN_INFO "e820: extended physical RAM map:\n");
671 e820_print_map("extended"); 674 e820_print_map("extended");
672} 675}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3f..63bdb29b2549 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
196static void __init intel_remapping_check(int num, int slot, int func) 196static void __init intel_remapping_check(int num, int slot, int func)
197{ 197{
198 u8 revision; 198 u8 revision;
199 u16 device;
199 200
201 device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
200 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); 202 revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
201 203
202 /* 204 /*
203 * Revision 0x13 of this chipset supports irq remapping 205 * Revision 13 of all triggering devices id in this quirk have
204 * but has an erratum that breaks its behavior, flag it as such 206 * a problem draining interrupts when irq remapping is enabled,
207 * and should be flagged as broken. Additionally revisions 0x12
208 * and 0x22 of device id 0x3405 has this problem.
205 */ 209 */
206 if (revision == 0x13) 210 if (revision == 0x13)
207 set_irq_remapping_broken(); 211 set_irq_remapping_broken();
212 else if ((device == 0x3405) &&
213 ((revision == 0x12) ||
214 (revision == 0x22)))
215 set_irq_remapping_broken();
208 216
209} 217}
210 218
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
239 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, 247 PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
240 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, 248 { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
241 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 249 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
250 { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
242 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
243 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
244 {} 254 {}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 138463a24877..06f87bece92a 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
29 reserve_ebda_region(); 29 reserve_ebda_region();
30} 30}
31 31
32void __init i386_start_kernel(void) 32asmlinkage void __init i386_start_kernel(void)
33{ 33{
34 sanitize_boot_params(&boot_params); 34 sanitize_boot_params(&boot_params);
35 35
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 55b67614ed94..1be8e43b669e 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
137 } 137 }
138} 138}
139 139
140void __init x86_64_start_kernel(char * real_mode_data) 140asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
141{ 141{
142 int i; 142 int i;
143 143
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5dd87a89f011..81ba27679f18 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -409,6 +409,7 @@ enable_paging:
409/* 409/*
410 * Check if it is 486 410 * Check if it is 486
411 */ 411 */
412 movb $4,X86 # at least 486
412 cmpl $-1,X86_CPUID 413 cmpl $-1,X86_CPUID
413 je is486 414 je is486
414 415
@@ -436,7 +437,6 @@ enable_paging:
436 movl %edx,X86_CAPABILITY 437 movl %edx,X86_CAPABILITY
437 438
438is486: 439is486:
439 movb $4,X86
440 movl $0x50022,%ecx # set AM, WP, NE and MP 440 movl $0x50022,%ecx # set AM, WP, NE and MP
441 movl %cr0,%eax 441 movl %cr0,%eax
442 andl $0x80000011,%eax # Save PG,PE,ET 442 andl $0x80000011,%eax # Save PG,PE,ET
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 202d24f0f7e7..5d576ab34403 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
116 116
117 if (cpu_has_fxsr) { 117 if (cpu_has_fxsr) {
118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); 118 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
119 asm volatile("fxsave %0" : : "m" (fx_scratch)); 119 asm volatile("fxsave %0" : "+m" (fx_scratch));
120 mask = fx_scratch.mxcsr_mask; 120 mask = fx_scratch.mxcsr_mask;
121 if (mask == 0) 121 if (mask == 0)
122 mask = 0x0000ffbf; 122 mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3a8185c042a2..22d0687e7fda 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -177,7 +177,7 @@ u64 arch_irq_stat(void)
177 * SMP cross-CPU interrupts have their own specific 177 * SMP cross-CPU interrupts have their own specific
178 * handlers). 178 * handlers).
179 */ 179 */
180unsigned int __irq_entry do_IRQ(struct pt_regs *regs) 180__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
181{ 181{
182 struct pt_regs *old_regs = set_irq_regs(regs); 182 struct pt_regs *old_regs = set_irq_regs(regs);
183 183
@@ -215,7 +215,7 @@ void __smp_x86_platform_ipi(void)
215 x86_platform_ipi_callback(); 215 x86_platform_ipi_callback();
216} 216}
217 217
218void smp_x86_platform_ipi(struct pt_regs *regs) 218__visible void smp_x86_platform_ipi(struct pt_regs *regs)
219{ 219{
220 struct pt_regs *old_regs = set_irq_regs(regs); 220 struct pt_regs *old_regs = set_irq_regs(regs);
221 221
@@ -229,7 +229,7 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
229/* 229/*
230 * Handler for POSTED_INTERRUPT_VECTOR. 230 * Handler for POSTED_INTERRUPT_VECTOR.
231 */ 231 */
232void smp_kvm_posted_intr_ipi(struct pt_regs *regs) 232__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
233{ 233{
234 struct pt_regs *old_regs = set_irq_regs(regs); 234 struct pt_regs *old_regs = set_irq_regs(regs);
235 235
@@ -247,7 +247,7 @@ void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
247} 247}
248#endif 248#endif
249 249
250void smp_trace_x86_platform_ipi(struct pt_regs *regs) 250__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
251{ 251{
252 struct pt_regs *old_regs = set_irq_regs(regs); 252 struct pt_regs *old_regs = set_irq_regs(regs);
253 253
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 636a55e4a13c..1de84e3ab4e0 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -22,14 +22,14 @@ static inline void __smp_irq_work_interrupt(void)
22 irq_work_run(); 22 irq_work_run();
23} 23}
24 24
25void smp_irq_work_interrupt(struct pt_regs *regs) 25__visible void smp_irq_work_interrupt(struct pt_regs *regs)
26{ 26{
27 irq_work_entering_irq(); 27 irq_work_entering_irq();
28 __smp_irq_work_interrupt(); 28 __smp_irq_work_interrupt();
29 exiting_irq(); 29 exiting_irq();
30} 30}
31 31
32void smp_trace_irq_work_interrupt(struct pt_regs *regs) 32__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
33{ 33{
34 irq_work_entering_irq(); 34 irq_work_entering_irq();
35 trace_irq_work_entry(IRQ_WORK_VECTOR); 35 trace_irq_work_entry(IRQ_WORK_VECTOR);
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 2889b3d43882..460f5d9ceebb 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,7 +37,19 @@ static void __jump_label_transform(struct jump_entry *entry,
37 } else 37 } else
38 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); 38 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
39 39
40 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); 40 /*
41 * Make text_poke_bp() a default fallback poker.
42 *
43 * At the time the change is being done, just ignore whether we
44 * are doing nop -> jump or jump -> nop transition, and assume
45 * always nop being the 'currently valid' instruction
46 *
47 */
48 if (poker)
49 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
50 else
51 text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
52 (void *)entry->code + JUMP_LABEL_NOP_SIZE);
41} 53}
42 54
43void arch_jump_label_transform(struct jump_entry *entry, 55void arch_jump_label_transform(struct jump_entry *entry,
@@ -45,7 +57,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
45{ 57{
46 get_online_cpus(); 58 get_online_cpus();
47 mutex_lock(&text_mutex); 59 mutex_lock(&text_mutex);
48 __jump_label_transform(entry, type, text_poke_smp); 60 __jump_label_transform(entry, type, NULL);
49 mutex_unlock(&text_mutex); 61 mutex_unlock(&text_mutex);
50 put_online_cpus(); 62 put_online_cpus();
51} 63}
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index 2e9d4b5af036..c6ee63f927ab 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -82,14 +82,9 @@ extern void synthesize_reljump(void *from, void *to);
82extern void synthesize_relcall(void *from, void *to); 82extern void synthesize_relcall(void *from, void *to);
83 83
84#ifdef CONFIG_OPTPROBES 84#ifdef CONFIG_OPTPROBES
85extern int arch_init_optprobes(void);
86extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); 85extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
87extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); 86extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
88#else /* !CONFIG_OPTPROBES */ 87#else /* !CONFIG_OPTPROBES */
89static inline int arch_init_optprobes(void)
90{
91 return 0;
92}
93static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) 88static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
94{ 89{
95 return 0; 90 return 0;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 211bce445522..79a3f9682871 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -661,7 +661,7 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
661/* 661/*
662 * Called from kretprobe_trampoline 662 * Called from kretprobe_trampoline
663 */ 663 */
664static __used __kprobes void *trampoline_handler(struct pt_regs *regs) 664__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
665{ 665{
666 struct kretprobe_instance *ri = NULL; 666 struct kretprobe_instance *ri = NULL;
667 struct hlist_head *head, empty_rp; 667 struct hlist_head *head, empty_rp;
@@ -1068,7 +1068,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1068 1068
1069int __init arch_init_kprobes(void) 1069int __init arch_init_kprobes(void)
1070{ 1070{
1071 return arch_init_optprobes(); 1071 return 0;
1072} 1072}
1073 1073
1074int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1074int __kprobes arch_trampoline_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 76dc6f095724..898160b42e43 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -88,9 +88,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
88 *(unsigned long *)addr = val; 88 *(unsigned long *)addr = val;
89} 89}
90 90
91static void __used __kprobes kprobes_optinsn_template_holder(void) 91asm (
92{
93 asm volatile (
94 ".global optprobe_template_entry\n" 92 ".global optprobe_template_entry\n"
95 "optprobe_template_entry:\n" 93 "optprobe_template_entry:\n"
96#ifdef CONFIG_X86_64 94#ifdef CONFIG_X86_64
@@ -129,7 +127,6 @@ static void __used __kprobes kprobes_optinsn_template_holder(void)
129#endif 127#endif
130 ".global optprobe_template_end\n" 128 ".global optprobe_template_end\n"
131 "optprobe_template_end:\n"); 129 "optprobe_template_end:\n");
132}
133 130
134#define TMPL_MOVE_IDX \ 131#define TMPL_MOVE_IDX \
135 ((long)&optprobe_template_val - (long)&optprobe_template_entry) 132 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
@@ -371,31 +368,6 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
371 return 0; 368 return 0;
372} 369}
373 370
374#define MAX_OPTIMIZE_PROBES 256
375static struct text_poke_param *jump_poke_params;
376static struct jump_poke_buffer {
377 u8 buf[RELATIVEJUMP_SIZE];
378} *jump_poke_bufs;
379
380static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
381 u8 *insn_buf,
382 struct optimized_kprobe *op)
383{
384 s32 rel = (s32)((long)op->optinsn.insn -
385 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
386
387 /* Backup instructions which will be replaced by jump address */
388 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
389 RELATIVE_ADDR_SIZE);
390
391 insn_buf[0] = RELATIVEJUMP_OPCODE;
392 *(s32 *)(&insn_buf[1]) = rel;
393
394 tprm->addr = op->kp.addr;
395 tprm->opcode = insn_buf;
396 tprm->len = RELATIVEJUMP_SIZE;
397}
398
399/* 371/*
400 * Replace breakpoints (int3) with relative jumps. 372 * Replace breakpoints (int3) with relative jumps.
401 * Caller must call with locking kprobe_mutex and text_mutex. 373 * Caller must call with locking kprobe_mutex and text_mutex.
@@ -403,37 +375,38 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
403void __kprobes arch_optimize_kprobes(struct list_head *oplist) 375void __kprobes arch_optimize_kprobes(struct list_head *oplist)
404{ 376{
405 struct optimized_kprobe *op, *tmp; 377 struct optimized_kprobe *op, *tmp;
406 int c = 0; 378 u8 insn_buf[RELATIVEJUMP_SIZE];
407 379
408 list_for_each_entry_safe(op, tmp, oplist, list) { 380 list_for_each_entry_safe(op, tmp, oplist, list) {
381 s32 rel = (s32)((long)op->optinsn.insn -
382 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
383
409 WARN_ON(kprobe_disabled(&op->kp)); 384 WARN_ON(kprobe_disabled(&op->kp));
410 /* Setup param */ 385
411 setup_optimize_kprobe(&jump_poke_params[c], 386 /* Backup instructions which will be replaced by jump address */
412 jump_poke_bufs[c].buf, op); 387 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
388 RELATIVE_ADDR_SIZE);
389
390 insn_buf[0] = RELATIVEJUMP_OPCODE;
391 *(s32 *)(&insn_buf[1]) = rel;
392
393 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
394 op->optinsn.insn);
395
413 list_del_init(&op->list); 396 list_del_init(&op->list);
414 if (++c >= MAX_OPTIMIZE_PROBES)
415 break;
416 } 397 }
417
418 /*
419 * text_poke_smp doesn't support NMI/MCE code modifying.
420 * However, since kprobes itself also doesn't support NMI/MCE
421 * code probing, it's not a problem.
422 */
423 text_poke_smp_batch(jump_poke_params, c);
424} 398}
425 399
426static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, 400/* Replace a relative jump with a breakpoint (int3). */
427 u8 *insn_buf, 401void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
428 struct optimized_kprobe *op)
429{ 402{
403 u8 insn_buf[RELATIVEJUMP_SIZE];
404
430 /* Set int3 to first byte for kprobes */ 405 /* Set int3 to first byte for kprobes */
431 insn_buf[0] = BREAKPOINT_INSTRUCTION; 406 insn_buf[0] = BREAKPOINT_INSTRUCTION;
432 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); 407 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
433 408 text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
434 tprm->addr = op->kp.addr; 409 op->optinsn.insn);
435 tprm->opcode = insn_buf;
436 tprm->len = RELATIVEJUMP_SIZE;
437} 410}
438 411
439/* 412/*
@@ -444,34 +417,11 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
444 struct list_head *done_list) 417 struct list_head *done_list)
445{ 418{
446 struct optimized_kprobe *op, *tmp; 419 struct optimized_kprobe *op, *tmp;
447 int c = 0;
448 420
449 list_for_each_entry_safe(op, tmp, oplist, list) { 421 list_for_each_entry_safe(op, tmp, oplist, list) {
450 /* Setup param */ 422 arch_unoptimize_kprobe(op);
451 setup_unoptimize_kprobe(&jump_poke_params[c],
452 jump_poke_bufs[c].buf, op);
453 list_move(&op->list, done_list); 423 list_move(&op->list, done_list);
454 if (++c >= MAX_OPTIMIZE_PROBES)
455 break;
456 } 424 }
457
458 /*
459 * text_poke_smp doesn't support NMI/MCE code modifying.
460 * However, since kprobes itself also doesn't support NMI/MCE
461 * code probing, it's not a problem.
462 */
463 text_poke_smp_batch(jump_poke_params, c);
464}
465
466/* Replace a relative jump with a breakpoint (int3). */
467void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
468{
469 u8 buf[RELATIVEJUMP_SIZE];
470
471 /* Set int3 to first byte for kprobes */
472 buf[0] = BREAKPOINT_INSTRUCTION;
473 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
474 text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
475} 425}
476 426
477int __kprobes 427int __kprobes
@@ -491,22 +441,3 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
491 } 441 }
492 return 0; 442 return 0;
493} 443}
494
495int __kprobes arch_init_optprobes(void)
496{
497 /* Allocate code buffer and parameter array */
498 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
499 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
500 if (!jump_poke_bufs)
501 return -ENOMEM;
502
503 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
504 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
505 if (!jump_poke_params) {
506 kfree(jump_poke_bufs);
507 jump_poke_bufs = NULL;
508 return -ENOMEM;
509 }
510
511 return 0;
512}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a96d32cc55b8..697b93af02dd 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -34,6 +34,7 @@
34#include <linux/sched.h> 34#include <linux/sched.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/kprobes.h> 36#include <linux/kprobes.h>
37#include <linux/debugfs.h>
37#include <asm/timer.h> 38#include <asm/timer.h>
38#include <asm/cpu.h> 39#include <asm/cpu.h>
39#include <asm/traps.h> 40#include <asm/traps.h>
@@ -419,6 +420,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
419 WARN_ON(kvm_register_clock("primary cpu clock")); 420 WARN_ON(kvm_register_clock("primary cpu clock"));
420 kvm_guest_cpu_init(); 421 kvm_guest_cpu_init();
421 native_smp_prepare_boot_cpu(); 422 native_smp_prepare_boot_cpu();
423 kvm_spinlock_init();
422} 424}
423 425
424static void kvm_guest_cpu_online(void *dummy) 426static void kvm_guest_cpu_online(void *dummy)
@@ -498,11 +500,9 @@ void __init kvm_guest_init(void)
498#endif 500#endif
499} 501}
500 502
501static bool __init kvm_detect(void) 503static uint32_t __init kvm_detect(void)
502{ 504{
503 if (!kvm_para_available()) 505 return kvm_cpuid_base();
504 return false;
505 return true;
506} 506}
507 507
508const struct hypervisor_x86 x86_hyper_kvm __refconst = { 508const struct hypervisor_x86 x86_hyper_kvm __refconst = {
@@ -523,3 +523,263 @@ static __init int activate_jump_labels(void)
523 return 0; 523 return 0;
524} 524}
525arch_initcall(activate_jump_labels); 525arch_initcall(activate_jump_labels);
526
527#ifdef CONFIG_PARAVIRT_SPINLOCKS
528
529/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
530static void kvm_kick_cpu(int cpu)
531{
532 int apicid;
533 unsigned long flags = 0;
534
535 apicid = per_cpu(x86_cpu_to_apicid, cpu);
536 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
537}
538
539enum kvm_contention_stat {
540 TAKEN_SLOW,
541 TAKEN_SLOW_PICKUP,
542 RELEASED_SLOW,
543 RELEASED_SLOW_KICKED,
544 NR_CONTENTION_STATS
545};
546
547#ifdef CONFIG_KVM_DEBUG_FS
548#define HISTO_BUCKETS 30
549
550static struct kvm_spinlock_stats
551{
552 u32 contention_stats[NR_CONTENTION_STATS];
553 u32 histo_spin_blocked[HISTO_BUCKETS+1];
554 u64 time_blocked;
555} spinlock_stats;
556
557static u8 zero_stats;
558
559static inline void check_zero(void)
560{
561 u8 ret;
562 u8 old;
563
564 old = ACCESS_ONCE(zero_stats);
565 if (unlikely(old)) {
566 ret = cmpxchg(&zero_stats, old, 0);
567 /* This ensures only one fellow resets the stat */
568 if (ret == old)
569 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
570 }
571}
572
573static inline void add_stats(enum kvm_contention_stat var, u32 val)
574{
575 check_zero();
576 spinlock_stats.contention_stats[var] += val;
577}
578
579
580static inline u64 spin_time_start(void)
581{
582 return sched_clock();
583}
584
585static void __spin_time_accum(u64 delta, u32 *array)
586{
587 unsigned index;
588
589 index = ilog2(delta);
590 check_zero();
591
592 if (index < HISTO_BUCKETS)
593 array[index]++;
594 else
595 array[HISTO_BUCKETS]++;
596}
597
598static inline void spin_time_accum_blocked(u64 start)
599{
600 u32 delta;
601
602 delta = sched_clock() - start;
603 __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
604 spinlock_stats.time_blocked += delta;
605}
606
607static struct dentry *d_spin_debug;
608static struct dentry *d_kvm_debug;
609
610struct dentry *kvm_init_debugfs(void)
611{
612 d_kvm_debug = debugfs_create_dir("kvm", NULL);
613 if (!d_kvm_debug)
614 printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
615
616 return d_kvm_debug;
617}
618
619static int __init kvm_spinlock_debugfs(void)
620{
621 struct dentry *d_kvm;
622
623 d_kvm = kvm_init_debugfs();
624 if (d_kvm == NULL)
625 return -ENOMEM;
626
627 d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
628
629 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
630
631 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
632 &spinlock_stats.contention_stats[TAKEN_SLOW]);
633 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
634 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
635
636 debugfs_create_u32("released_slow", 0444, d_spin_debug,
637 &spinlock_stats.contention_stats[RELEASED_SLOW]);
638 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
639 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
640
641 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
642 &spinlock_stats.time_blocked);
643
644 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
645 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
646
647 return 0;
648}
649fs_initcall(kvm_spinlock_debugfs);
650#else /* !CONFIG_KVM_DEBUG_FS */
651static inline void add_stats(enum kvm_contention_stat var, u32 val)
652{
653}
654
655static inline u64 spin_time_start(void)
656{
657 return 0;
658}
659
660static inline void spin_time_accum_blocked(u64 start)
661{
662}
663#endif /* CONFIG_KVM_DEBUG_FS */
664
665struct kvm_lock_waiting {
666 struct arch_spinlock *lock;
667 __ticket_t want;
668};
669
670/* cpus 'waiting' on a spinlock to become available */
671static cpumask_t waiting_cpus;
672
673/* Track spinlock on which a cpu is waiting */
674static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
675
676static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
677{
678 struct kvm_lock_waiting *w;
679 int cpu;
680 u64 start;
681 unsigned long flags;
682
683 if (in_nmi())
684 return;
685
686 w = &__get_cpu_var(klock_waiting);
687 cpu = smp_processor_id();
688 start = spin_time_start();
689
690 /*
691 * Make sure an interrupt handler can't upset things in a
692 * partially setup state.
693 */
694 local_irq_save(flags);
695
696 /*
697 * The ordering protocol on this is that the "lock" pointer
698 * may only be set non-NULL if the "want" ticket is correct.
699 * If we're updating "want", we must first clear "lock".
700 */
701 w->lock = NULL;
702 smp_wmb();
703 w->want = want;
704 smp_wmb();
705 w->lock = lock;
706
707 add_stats(TAKEN_SLOW, 1);
708
709 /*
710 * This uses set_bit, which is atomic but we should not rely on its
711 * reordering gurantees. So barrier is needed after this call.
712 */
713 cpumask_set_cpu(cpu, &waiting_cpus);
714
715 barrier();
716
717 /*
718 * Mark entry to slowpath before doing the pickup test to make
719 * sure we don't deadlock with an unlocker.
720 */
721 __ticket_enter_slowpath(lock);
722
723 /*
724 * check again make sure it didn't become free while
725 * we weren't looking.
726 */
727 if (ACCESS_ONCE(lock->tickets.head) == want) {
728 add_stats(TAKEN_SLOW_PICKUP, 1);
729 goto out;
730 }
731
732 /*
733 * halt until it's our turn and kicked. Note that we do safe halt
734 * for irq enabled case to avoid hang when lock info is overwritten
735 * in irq spinlock slowpath and no spurious interrupt occur to save us.
736 */
737 if (arch_irqs_disabled_flags(flags))
738 halt();
739 else
740 safe_halt();
741
742out:
743 cpumask_clear_cpu(cpu, &waiting_cpus);
744 w->lock = NULL;
745 local_irq_restore(flags);
746 spin_time_accum_blocked(start);
747}
748PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
749
750/* Kick vcpu waiting on @lock->head to reach value @ticket */
751static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
752{
753 int cpu;
754
755 add_stats(RELEASED_SLOW, 1);
756 for_each_cpu(cpu, &waiting_cpus) {
757 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
758 if (ACCESS_ONCE(w->lock) == lock &&
759 ACCESS_ONCE(w->want) == ticket) {
760 add_stats(RELEASED_SLOW_KICKED, 1);
761 kvm_kick_cpu(cpu);
762 break;
763 }
764 }
765}
766
767/*
768 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
769 */
770void __init kvm_spinlock_init(void)
771{
772 if (!kvm_para_available())
773 return;
774 /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return;
777
778 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
779
780 static_key_slow_inc(&paravirt_ticketlocks_enabled);
781
782 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
783 pv_lock_ops.unlock_kick = kvm_unlock_kick;
784}
785#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1dbfbcb..7123b5df479d 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
145 return 0; 145 return 0;
146} 146}
147 147
148static unsigned int verify_patch_size(int cpu, u32 patch_size, 148static unsigned int verify_patch_size(u8 family, u32 patch_size,
149 unsigned int size) 149 unsigned int size)
150{ 150{
151 struct cpuinfo_x86 *c = &cpu_data(cpu);
152 u32 max_size; 151 u32 max_size;
153 152
154#define F1XH_MPB_MAX_SIZE 2048 153#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
156#define F15H_MPB_MAX_SIZE 4096 155#define F15H_MPB_MAX_SIZE 4096
157#define F16H_MPB_MAX_SIZE 3458 156#define F16H_MPB_MAX_SIZE 3458
158 157
159 switch (c->x86) { 158 switch (family) {
160 case 0x14: 159 case 0x14:
161 max_size = F14H_MPB_MAX_SIZE; 160 max_size = F14H_MPB_MAX_SIZE;
162 break; 161 break;
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu)
220 return 0; 219 return 0;
221 } 220 }
222 221
223 if (__apply_microcode_amd(mc_amd)) 222 if (__apply_microcode_amd(mc_amd)) {
224 pr_err("CPU%d: update failed for patch_level=0x%08x\n", 223 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
225 cpu, mc_amd->hdr.patch_id); 224 cpu, mc_amd->hdr.patch_id);
226 else 225 return -1;
227 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, 226 }
228 mc_amd->hdr.patch_id); 227 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
228 mc_amd->hdr.patch_id);
229 229
230 uci->cpu_sig.rev = mc_amd->hdr.patch_id; 230 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
231 c->microcode = mc_amd->hdr.patch_id; 231 c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@ static void cleanup(void)
276 * driver cannot continue functioning normally. In such cases, we tear 276 * driver cannot continue functioning normally. In such cases, we tear
277 * down everything we've used up so far and exit. 277 * down everything we've used up so far and exit.
278 */ 278 */
279static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) 279static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
280{ 280{
281 struct cpuinfo_x86 *c = &cpu_data(cpu);
282 struct microcode_header_amd *mc_hdr; 281 struct microcode_header_amd *mc_hdr;
283 struct ucode_patch *patch; 282 struct ucode_patch *patch;
284 unsigned int patch_size, crnt_size, ret; 283 unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
298 297
299 /* check if patch is for the current family */ 298 /* check if patch is for the current family */
300 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); 299 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
301 if (proc_fam != c->x86) 300 if (proc_fam != family)
302 return crnt_size; 301 return crnt_size;
303 302
304 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { 303 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
307 return crnt_size; 306 return crnt_size;
308 } 307 }
309 308
310 ret = verify_patch_size(cpu, patch_size, leftover); 309 ret = verify_patch_size(family, patch_size, leftover);
311 if (!ret) { 310 if (!ret) {
312 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); 311 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
313 return crnt_size; 312 return crnt_size;
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
338 return crnt_size; 337 return crnt_size;
339} 338}
340 339
341static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) 340static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
341 size_t size)
342{ 342{
343 enum ucode_state ret = UCODE_ERROR; 343 enum ucode_state ret = UCODE_ERROR;
344 unsigned int leftover; 344 unsigned int leftover;
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
361 } 361 }
362 362
363 while (leftover) { 363 while (leftover) {
364 crnt_size = verify_and_add_patch(cpu, fw, leftover); 364 crnt_size = verify_and_add_patch(family, fw, leftover);
365 if (crnt_size < 0) 365 if (crnt_size < 0)
366 return ret; 366 return ret;
367 367
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
372 return UCODE_OK; 372 return UCODE_OK;
373} 373}
374 374
375enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) 375enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
376{ 376{
377 enum ucode_state ret; 377 enum ucode_state ret;
378 378
379 /* free old equiv table */ 379 /* free old equiv table */
380 free_equiv_cpu_table(); 380 free_equiv_cpu_table();
381 381
382 ret = __load_microcode_amd(cpu, data, size); 382 ret = __load_microcode_amd(family, data, size);
383 383
384 if (ret != UCODE_OK) 384 if (ret != UCODE_OK)
385 cleanup(); 385 cleanup();
386 386
387#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) 387#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
388 /* save BSP's matching patch for early load */ 388 /* save BSP's matching patch for early load */
389 if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { 389 if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
390 struct ucode_patch *p = find_patch(cpu); 390 struct ucode_patch *p = find_patch(smp_processor_id());
391 if (p) { 391 if (p) {
392 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); 392 memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
393 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), 393 memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
440 goto fw_release; 440 goto fw_release;
441 } 441 }
442 442
443 ret = load_microcode_amd(cpu, fw->data, fw->size); 443 ret = load_microcode_amd(c->x86, fw->data, fw->size);
444 444
445 fw_release: 445 fw_release:
446 release_firmware(fw); 446 release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffee5749..6073104ccaa3 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241void load_ucode_amd_ap(void)
242 struct ucode_cpu_info *uci)
243{ 242{
243 unsigned int cpu = smp_processor_id();
244 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
244 u32 rev, eax; 245 u32 rev, eax;
245 246
246 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); 247 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
247 eax = cpuid_eax(0x00000001); 248 eax = cpuid_eax(0x00000001);
248 249
249 uci->cpu_sig.sig = eax;
250 uci->cpu_sig.rev = rev; 250 uci->cpu_sig.rev = rev;
251 c->microcode = rev; 251 uci->cpu_sig.sig = eax;
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253}
254
255void load_ucode_amd_ap(void)
256{
257 unsigned int cpu = smp_processor_id();
258
259 collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
260 252
261 if (cpu && !ucode_loaded) { 253 if (cpu && !ucode_loaded) {
262 void *ucode; 254 void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
265 return; 257 return;
266 258
267 ucode = (void *)(initrd_start + ucode_offset); 259 ucode = (void *)(initrd_start + ucode_offset);
268 if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) 260 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
261 if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
269 return; 262 return;
263
270 ucode_loaded = true; 264 ucode_loaded = true;
271 } 265 }
272 266
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
278{ 272{
279 enum ucode_state ret; 273 enum ucode_state ret;
280 void *ucode; 274 void *ucode;
275 u32 eax;
276
281#ifdef CONFIG_X86_32 277#ifdef CONFIG_X86_32
282 unsigned int bsp = boot_cpu_data.cpu_index; 278 unsigned int bsp = boot_cpu_data.cpu_index;
283 struct ucode_cpu_info *uci = ucode_cpu_info + bsp; 279 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
293 return 0; 289 return 0;
294 290
295 ucode = (void *)(initrd_start + ucode_offset); 291 ucode = (void *)(initrd_start + ucode_offset);
296 ret = load_microcode_amd(0, ucode, ucode_size); 292 eax = cpuid_eax(0x00000001);
293 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
294
295 ret = load_microcode_amd(eax, ucode, ucode_size);
297 if (ret != UCODE_OK) 296 if (ret != UCODE_OK)
298 return -EINVAL; 297 return -EINVAL;
299 298
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 676b8c77a976..bbb6c7316341 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -4,25 +4,17 @@
4 */ 4 */
5#include <linux/spinlock.h> 5#include <linux/spinlock.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/jump_label.h>
7 8
8#include <asm/paravirt.h> 9#include <asm/paravirt.h>
9 10
10static inline void
11default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
12{
13 arch_spin_lock(lock);
14}
15
16struct pv_lock_ops pv_lock_ops = { 11struct pv_lock_ops pv_lock_ops = {
17#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
18 .spin_is_locked = __ticket_spin_is_locked, 13 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
19 .spin_is_contended = __ticket_spin_is_contended, 14 .unlock_kick = paravirt_nop,
20
21 .spin_lock = __ticket_spin_lock,
22 .spin_lock_flags = default_spin_lock_flags,
23 .spin_trylock = __ticket_spin_trylock,
24 .spin_unlock = __ticket_spin_unlock,
25#endif 15#endif
26}; 16};
27EXPORT_SYMBOL(pv_lock_ops); 17EXPORT_SYMBOL(pv_lock_ops);
28 18
19struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
20EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cd6de64cc480..1b10af835c31 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -62,11 +62,6 @@ void __init default_banner(void)
62 pv_info.name); 62 pv_info.name);
63} 63}
64 64
65/* Simple instruction patching code. */
66#define DEF_NATIVE(ops, name, code) \
67 extern const char start_##ops##_##name[], end_##ops##_##name[]; \
68 asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
69
70/* Undefined instruction for dealing with missing ops pointers. */ 65/* Undefined instruction for dealing with missing ops pointers. */
71static const unsigned char ud2a[] = { 0x0f, 0x0b }; 66static const unsigned char ud2a[] = { 0x0f, 0x0b };
72 67
@@ -324,7 +319,7 @@ struct pv_time_ops pv_time_ops = {
324 .steal_clock = native_steal_clock, 319 .steal_clock = native_steal_clock,
325}; 320};
326 321
327struct pv_irq_ops pv_irq_ops = { 322__visible struct pv_irq_ops pv_irq_ops = {
328 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), 323 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
329 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), 324 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
330 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), 325 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
@@ -336,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
336#endif 331#endif
337}; 332};
338 333
339struct pv_cpu_ops pv_cpu_ops = { 334__visible struct pv_cpu_ops pv_cpu_ops = {
340 .cpuid = native_cpuid, 335 .cpuid = native_cpuid,
341 .get_debugreg = native_get_debugreg, 336 .get_debugreg = native_get_debugreg,
342 .set_debugreg = native_set_debugreg, 337 .set_debugreg = native_set_debugreg,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 83369e5a1d27..c83516be1052 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -36,7 +36,7 @@
36 * section. Since TSS's are completely CPU-local, we want them 36 * section. Since TSS's are completely CPU-local, we want them
37 * on exact cacheline boundaries, to eliminate cacheline ping-pong. 37 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
38 */ 38 */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 39__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
40 40
41#ifdef CONFIG_X86_64 41#ifdef CONFIG_X86_64
42static DEFINE_PER_CPU(unsigned char, is_idle); 42static DEFINE_PER_CPU(unsigned char, is_idle);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f8adefca71dc..884f98f69354 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(start_thread);
247 * the task-switch, and shows up in ret_from_fork in entry.S, 247 * the task-switch, and shows up in ret_from_fork in entry.S,
248 * for example. 248 * for example.
249 */ 249 */
250__notrace_funcgraph struct task_struct * 250__visible __notrace_funcgraph struct task_struct *
251__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 251__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
252{ 252{
253 struct thread_struct *prev = &prev_p->thread, 253 struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 05646bab4ca6..bb1dc51bab05 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
52 52
53asmlinkage extern void ret_from_fork(void); 53asmlinkage extern void ret_from_fork(void);
54 54
55DEFINE_PER_CPU(unsigned long, old_rsp); 55asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
56 56
57/* Prints also some state that isn't saved in the pt_regs */ 57/* Prints also some state that isn't saved in the pt_regs */
58void __show_regs(struct pt_regs *regs, int all) 58void __show_regs(struct pt_regs *regs, int all)
@@ -274,7 +274,7 @@ void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
274 * Kprobes not supported here. Set the probe on schedule instead. 274 * Kprobes not supported here. Set the probe on schedule instead.
275 * Function graph tracer not supported too. 275 * Function graph tracer not supported too.
276 */ 276 */
277__notrace_funcgraph struct task_struct * 277__visible __notrace_funcgraph struct task_struct *
278__switch_to(struct task_struct *prev_p, struct task_struct *next_p) 278__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
279{ 279{
280 struct thread_struct *prev = &prev_p->thread; 280 struct thread_struct *prev = &prev_p->thread;
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2cb9470ea85b..a16bae3f83b3 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -128,46 +128,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
128 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec); 128 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
129} 129}
130 130
131static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
132
133static struct pvclock_vsyscall_time_info *
134pvclock_get_vsyscall_user_time_info(int cpu)
135{
136 if (!pvclock_vdso_info) {
137 BUG();
138 return NULL;
139 }
140
141 return &pvclock_vdso_info[cpu];
142}
143
144struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
145{
146 return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
147}
148
149#ifdef CONFIG_X86_64 131#ifdef CONFIG_X86_64
150static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
151 void *v)
152{
153 struct task_migration_notifier *mn = v;
154 struct pvclock_vsyscall_time_info *pvti;
155
156 pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
157
158 /* this is NULL when pvclock vsyscall is not initialized */
159 if (unlikely(pvti == NULL))
160 return NOTIFY_DONE;
161
162 pvti->migrate_count++;
163
164 return NOTIFY_DONE;
165}
166
167static struct notifier_block pvclock_migrate = {
168 .notifier_call = pvclock_task_migrate,
169};
170
171/* 132/*
172 * Initialize the generic pvclock vsyscall state. This will allocate 133 * Initialize the generic pvclock vsyscall state. This will allocate
173 * a/some page(s) for the per-vcpu pvclock information, set up a 134 * a/some page(s) for the per-vcpu pvclock information, set up a
@@ -181,17 +142,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
181 142
182 WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE); 143 WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
183 144
184 pvclock_vdso_info = i;
185
186 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) { 145 for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
187 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx, 146 __set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
188 __pa(i) + (idx*PAGE_SIZE), 147 __pa(i) + (idx*PAGE_SIZE),
189 PAGE_KERNEL_VVAR); 148 PAGE_KERNEL_VVAR);
190 } 149 }
191 150
192
193 register_task_migration_notifier(&pvclock_migrate);
194
195 return 0; 151 return 0;
196} 152}
197#endif 153#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f8ec57815c05..f0de6294b955 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -206,9 +206,9 @@ EXPORT_SYMBOL(boot_cpu_data);
206 206
207 207
208#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64) 208#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
209unsigned long mmu_cr4_features; 209__visible unsigned long mmu_cr4_features;
210#else 210#else
211unsigned long mmu_cr4_features = X86_CR4_PAE; 211__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
212#endif 212#endif
213 213
214/* Boot loader ID and version as integers, for the benefit of proc_dointvec */ 214/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
@@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
426static void __init parse_setup_data(void) 426static void __init parse_setup_data(void)
427{ 427{
428 struct setup_data *data; 428 struct setup_data *data;
429 u64 pa_data; 429 u64 pa_data, pa_next;
430 430
431 pa_data = boot_params.hdr.setup_data; 431 pa_data = boot_params.hdr.setup_data;
432 while (pa_data) { 432 while (pa_data) {
433 u32 data_len, map_len; 433 u32 data_len, map_len, data_type;
434 434
435 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK), 435 map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
436 (u64)sizeof(struct setup_data)); 436 (u64)sizeof(struct setup_data));
437 data = early_memremap(pa_data, map_len); 437 data = early_memremap(pa_data, map_len);
438 data_len = data->len + sizeof(struct setup_data); 438 data_len = data->len + sizeof(struct setup_data);
439 if (data_len > map_len) { 439 data_type = data->type;
440 early_iounmap(data, map_len); 440 pa_next = data->next;
441 data = early_memremap(pa_data, data_len); 441 early_iounmap(data, map_len);
442 map_len = data_len;
443 }
444 442
445 switch (data->type) { 443 switch (data_type) {
446 case SETUP_E820_EXT: 444 case SETUP_E820_EXT:
447 parse_e820_ext(data); 445 parse_e820_ext(pa_data, data_len);
448 break; 446 break;
449 case SETUP_DTB: 447 case SETUP_DTB:
450 add_dtb(pa_data); 448 add_dtb(pa_data);
@@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
452 default: 450 default:
453 break; 451 break;
454 } 452 }
455 pa_data = data->next; 453 pa_data = pa_next;
456 early_iounmap(data, map_len);
457 } 454 }
458} 455}
459 456
@@ -1070,7 +1067,7 @@ void __init setup_arch(char **cmdline_p)
1070 1067
1071 cleanup_highmap(); 1068 cleanup_highmap();
1072 1069
1073 memblock.current_limit = ISA_END_ADDRESS; 1070 memblock_set_current_limit(ISA_END_ADDRESS);
1074 memblock_x86_fill(); 1071 memblock_x86_fill();
1075 1072
1076 /* 1073 /*
@@ -1103,7 +1100,7 @@ void __init setup_arch(char **cmdline_p)
1103 1100
1104 setup_real_mode(); 1101 setup_real_mode();
1105 1102
1106 memblock.current_limit = get_max_mapped(); 1103 memblock_set_current_limit(get_max_mapped());
1107 dma_contiguous_reserve(0); 1104 dma_contiguous_reserve(0);
1108 1105
1109 /* 1106 /*
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index cf913587d4dd..9e5de6813e1f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -358,7 +358,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
358 else 358 else
359 put_user_ex(0, &frame->uc.uc_flags); 359 put_user_ex(0, &frame->uc.uc_flags);
360 put_user_ex(0, &frame->uc.uc_link); 360 put_user_ex(0, &frame->uc.uc_link);
361 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 361 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
362 362
363 /* Set up to return from userspace. */ 363 /* Set up to return from userspace. */
364 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); 364 restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -423,7 +423,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
423 else 423 else
424 put_user_ex(0, &frame->uc.uc_flags); 424 put_user_ex(0, &frame->uc.uc_flags);
425 put_user_ex(0, &frame->uc.uc_link); 425 put_user_ex(0, &frame->uc.uc_link);
426 err |= __save_altstack(&frame->uc.uc_stack, regs->sp); 426 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
427 427
428 /* Set up to return from userspace. If provided, use a stub 428 /* Set up to return from userspace. If provided, use a stub
429 already in userspace. */ 429 already in userspace. */
@@ -490,7 +490,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
490 else 490 else
491 put_user_ex(0, &frame->uc.uc_flags); 491 put_user_ex(0, &frame->uc.uc_flags);
492 put_user_ex(0, &frame->uc.uc_link); 492 put_user_ex(0, &frame->uc.uc_link);
493 err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp); 493 compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
494 put_user_ex(0, &frame->uc.uc__pad0); 494 put_user_ex(0, &frame->uc.uc__pad0);
495 495
496 if (ksig->ka.sa.sa_flags & SA_RESTORER) { 496 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
@@ -533,7 +533,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
533 * Do a signal return; undo the signal stack. 533 * Do a signal return; undo the signal stack.
534 */ 534 */
535#ifdef CONFIG_X86_32 535#ifdef CONFIG_X86_32
536unsigned long sys_sigreturn(void) 536asmlinkage unsigned long sys_sigreturn(void)
537{ 537{
538 struct pt_regs *regs = current_pt_regs(); 538 struct pt_regs *regs = current_pt_regs();
539 struct sigframe __user *frame; 539 struct sigframe __user *frame;
@@ -562,7 +562,7 @@ badframe:
562} 562}
563#endif /* CONFIG_X86_32 */ 563#endif /* CONFIG_X86_32 */
564 564
565long sys_rt_sigreturn(void) 565asmlinkage long sys_rt_sigreturn(void)
566{ 566{
567 struct pt_regs *regs = current_pt_regs(); 567 struct pt_regs *regs = current_pt_regs();
568 struct rt_sigframe __user *frame; 568 struct rt_sigframe __user *frame;
@@ -728,7 +728,7 @@ static void do_signal(struct pt_regs *regs)
728 * notification of userspace execution resumption 728 * notification of userspace execution resumption
729 * - triggered by the TIF_WORK_MASK flags 729 * - triggered by the TIF_WORK_MASK flags
730 */ 730 */
731void 731__visible void
732do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) 732do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
733{ 733{
734 user_exit(); 734 user_exit();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index cdaa347dfcad..7c3a5a61f2e4 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -256,7 +256,7 @@ static inline void __smp_reschedule_interrupt(void)
256 scheduler_ipi(); 256 scheduler_ipi();
257} 257}
258 258
259void smp_reschedule_interrupt(struct pt_regs *regs) 259__visible void smp_reschedule_interrupt(struct pt_regs *regs)
260{ 260{
261 ack_APIC_irq(); 261 ack_APIC_irq();
262 __smp_reschedule_interrupt(); 262 __smp_reschedule_interrupt();
@@ -271,7 +271,7 @@ static inline void smp_entering_irq(void)
271 irq_enter(); 271 irq_enter();
272} 272}
273 273
274void smp_trace_reschedule_interrupt(struct pt_regs *regs) 274__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
275{ 275{
276 /* 276 /*
277 * Need to call irq_enter() before calling the trace point. 277 * Need to call irq_enter() before calling the trace point.
@@ -295,14 +295,14 @@ static inline void __smp_call_function_interrupt(void)
295 inc_irq_stat(irq_call_count); 295 inc_irq_stat(irq_call_count);
296} 296}
297 297
298void smp_call_function_interrupt(struct pt_regs *regs) 298__visible void smp_call_function_interrupt(struct pt_regs *regs)
299{ 299{
300 smp_entering_irq(); 300 smp_entering_irq();
301 __smp_call_function_interrupt(); 301 __smp_call_function_interrupt();
302 exiting_irq(); 302 exiting_irq();
303} 303}
304 304
305void smp_trace_call_function_interrupt(struct pt_regs *regs) 305__visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
306{ 306{
307 smp_entering_irq(); 307 smp_entering_irq();
308 trace_call_function_entry(CALL_FUNCTION_VECTOR); 308 trace_call_function_entry(CALL_FUNCTION_VECTOR);
@@ -317,14 +317,14 @@ static inline void __smp_call_function_single_interrupt(void)
317 inc_irq_stat(irq_call_count); 317 inc_irq_stat(irq_call_count);
318} 318}
319 319
320void smp_call_function_single_interrupt(struct pt_regs *regs) 320__visible void smp_call_function_single_interrupt(struct pt_regs *regs)
321{ 321{
322 smp_entering_irq(); 322 smp_entering_irq();
323 __smp_call_function_single_interrupt(); 323 __smp_call_function_single_interrupt();
324 exiting_irq(); 324 exiting_irq();
325} 325}
326 326
327void smp_trace_call_function_single_interrupt(struct pt_regs *regs) 327__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
328{ 328{
329 smp_entering_irq(); 329 smp_entering_irq();
330 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR); 330 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb81..30277e27431a 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
101 *begin = new_begin; 101 *begin = new_begin;
102 } 102 }
103 } else { 103 } else {
104 *begin = TASK_UNMAPPED_BASE; 104 *begin = current->mm->mmap_legacy_base;
105 *end = TASK_SIZE; 105 *end = TASK_SIZE;
106 } 106 }
107} 107}
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/kernel/syscall_32.c
index 147fcd4941c4..e9bcd57d8a9e 100644
--- a/arch/x86/kernel/syscall_32.c
+++ b/arch/x86/kernel/syscall_32.c
@@ -15,7 +15,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
15 15
16extern asmlinkage void sys_ni_syscall(void); 16extern asmlinkage void sys_ni_syscall(void);
17 17
18const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { 18__visible const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
19 /* 19 /*
20 * Smells like a compiler bug -- it doesn't work 20 * Smells like a compiler bug -- it doesn't work
21 * when the & below is removed. 21 * when the & below is removed.
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 5c7f8c20da74..4ac730b37f0b 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -4,6 +4,7 @@
4#include <linux/sys.h> 4#include <linux/sys.h>
5#include <linux/cache.h> 5#include <linux/cache.h>
6#include <asm/asm-offsets.h> 6#include <asm/asm-offsets.h>
7#include <asm/syscall.h>
7 8
8#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat) 9#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
9 10
@@ -19,11 +20,9 @@
19 20
20#define __SYSCALL_64(nr, sym, compat) [nr] = sym, 21#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
21 22
22typedef void (*sys_call_ptr_t)(void);
23
24extern void sys_ni_syscall(void); 23extern void sys_ni_syscall(void);
25 24
26const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = { 25asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
27 /* 26 /*
28 * Smells like a compiler bug -- it doesn't work 27 * Smells like a compiler bug -- it doesn't work
29 * when the & below is removed. 28 * when the & below is removed.
diff --git a/arch/x86/kernel/sysfb.c b/arch/x86/kernel/sysfb.c
new file mode 100644
index 000000000000..193ec2ce46c7
--- /dev/null
+++ b/arch/x86/kernel/sysfb.c
@@ -0,0 +1,74 @@
1/*
2 * Generic System Framebuffers on x86
3 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11/*
12 * Simple-Framebuffer support for x86 systems
13 * Create a platform-device for any available boot framebuffer. The
14 * simple-framebuffer platform device is already available on DT systems, so
15 * this module parses the global "screen_info" object and creates a suitable
16 * platform device compatible with the "simple-framebuffer" DT object. If
17 * the framebuffer is incompatible, we instead create a legacy
18 * "vesa-framebuffer", "efi-framebuffer" or "platform-framebuffer" device and
19 * pass the screen_info as platform_data. This allows legacy drivers
20 * to pick these devices up without messing with simple-framebuffer drivers.
21 * The global "screen_info" is still valid at all times.
22 *
23 * If CONFIG_X86_SYSFB is not selected, we never register "simple-framebuffer"
24 * platform devices, but only use legacy framebuffer devices for
25 * backwards compatibility.
26 *
27 * TODO: We set the dev_id field of all platform-devices to 0. This allows
28 * other x86 OF/DT parsers to create such devices, too. However, they must
29 * start at offset 1 for this to work.
30 */
31
32#include <linux/err.h>
33#include <linux/init.h>
34#include <linux/kernel.h>
35#include <linux/mm.h>
36#include <linux/platform_data/simplefb.h>
37#include <linux/platform_device.h>
38#include <linux/screen_info.h>
39#include <asm/sysfb.h>
40
41static __init int sysfb_init(void)
42{
43 struct screen_info *si = &screen_info;
44 struct simplefb_platform_data mode;
45 struct platform_device *pd;
46 const char *name;
47 bool compatible;
48 int ret;
49
50 sysfb_apply_efi_quirks();
51
52 /* try to create a simple-framebuffer device */
53 compatible = parse_mode(si, &mode);
54 if (compatible) {
55 ret = create_simplefb(si, &mode);
56 if (!ret)
57 return 0;
58 }
59
60 /* if the FB is incompatible, create a legacy framebuffer device */
61 if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
62 name = "efi-framebuffer";
63 else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
64 name = "vesa-framebuffer";
65 else
66 name = "platform-framebuffer";
67
68 pd = platform_device_register_resndata(NULL, name, 0,
69 NULL, 0, si, sizeof(*si));
70 return IS_ERR(pd) ? PTR_ERR(pd) : 0;
71}
72
73/* must execute after PCI subsystem for EFI quirks */
74device_initcall(sysfb_init);
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
new file mode 100644
index 000000000000..b285d4e8c68e
--- /dev/null
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -0,0 +1,214 @@
1/*
2 * Generic System Framebuffers on x86
3 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
4 *
5 * EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 */
12
13/*
14 * EFI Quirks
15 * Several EFI systems do not correctly advertise their boot framebuffers.
16 * Hence, we use this static table of known broken machines and fix up the
17 * information so framebuffer drivers can load corectly.
18 */
19
20#include <linux/dmi.h>
21#include <linux/err.h>
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/pci.h>
26#include <linux/screen_info.h>
27#include <video/vga.h>
28#include <asm/sysfb.h>
29
30enum {
31 OVERRIDE_NONE = 0x0,
32 OVERRIDE_BASE = 0x1,
33 OVERRIDE_STRIDE = 0x2,
34 OVERRIDE_HEIGHT = 0x4,
35 OVERRIDE_WIDTH = 0x8,
36};
37
38struct efifb_dmi_info efifb_dmi_list[] = {
39 [M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
40 [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */
41 [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE },
42 [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */
43 [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
44 [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE },
45 [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE },
46 [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE },
47 [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE },
48 [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
49 [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
50 [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
51 [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
52 [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
53 [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
54 /* 11" Macbook Air 3,1 passes the wrong stride */
55 [M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE },
56 [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
57 [M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */
58 [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
59 [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
60 [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
61 [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
62 [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
63 [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
64 [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
65 [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE },
66 [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
67 [M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
68 [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
69};
70
71#define choose_value(dmivalue, fwvalue, field, flags) ({ \
72 typeof(fwvalue) _ret_ = fwvalue; \
73 if ((flags) & (field)) \
74 _ret_ = dmivalue; \
75 else if ((fwvalue) == 0) \
76 _ret_ = dmivalue; \
77 _ret_; \
78 })
79
80static int __init efifb_set_system(const struct dmi_system_id *id)
81{
82 struct efifb_dmi_info *info = id->driver_data;
83
84 if (info->base == 0 && info->height == 0 && info->width == 0 &&
85 info->stride == 0)
86 return 0;
87
88 /* Trust the bootloader over the DMI tables */
89 if (screen_info.lfb_base == 0) {
90#if defined(CONFIG_PCI)
91 struct pci_dev *dev = NULL;
92 int found_bar = 0;
93#endif
94 if (info->base) {
95 screen_info.lfb_base = choose_value(info->base,
96 screen_info.lfb_base, OVERRIDE_BASE,
97 info->flags);
98
99#if defined(CONFIG_PCI)
100 /* make sure that the address in the table is actually
101 * on a VGA device's PCI BAR */
102
103 for_each_pci_dev(dev) {
104 int i;
105 if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
106 continue;
107 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
108 resource_size_t start, end;
109
110 start = pci_resource_start(dev, i);
111 if (start == 0)
112 break;
113 end = pci_resource_end(dev, i);
114 if (screen_info.lfb_base >= start &&
115 screen_info.lfb_base < end) {
116 found_bar = 1;
117 }
118 }
119 }
120 if (!found_bar)
121 screen_info.lfb_base = 0;
122#endif
123 }
124 }
125 if (screen_info.lfb_base) {
126 screen_info.lfb_linelength = choose_value(info->stride,
127 screen_info.lfb_linelength, OVERRIDE_STRIDE,
128 info->flags);
129 screen_info.lfb_width = choose_value(info->width,
130 screen_info.lfb_width, OVERRIDE_WIDTH,
131 info->flags);
132 screen_info.lfb_height = choose_value(info->height,
133 screen_info.lfb_height, OVERRIDE_HEIGHT,
134 info->flags);
135 if (screen_info.orig_video_isVGA == 0)
136 screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
137 } else {
138 screen_info.lfb_linelength = 0;
139 screen_info.lfb_width = 0;
140 screen_info.lfb_height = 0;
141 screen_info.orig_video_isVGA = 0;
142 return 0;
143 }
144
145 printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
146 "(%dx%d, stride %d)\n", id->ident,
147 screen_info.lfb_base, screen_info.lfb_width,
148 screen_info.lfb_height, screen_info.lfb_linelength);
149
150 return 1;
151}
152
153#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \
154 { \
155 efifb_set_system, \
156 name, \
157 { \
158 DMI_MATCH(DMI_BIOS_VENDOR, vendor), \
159 DMI_MATCH(DMI_PRODUCT_NAME, name) \
160 }, \
161 &efifb_dmi_list[enumid] \
162 }
163
164static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
165 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
166 /* At least one of these two will be right; maybe both? */
167 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
168 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20),
169 /* At least one of these two will be right; maybe both? */
170 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
171 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
172 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
173 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
174 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
175 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
176 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
177 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
178 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
179 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
180 /* At least one of these two will be right; maybe both? */
181 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
182 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB),
183 /* At least one of these two will be right; maybe both? */
184 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
185 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
186 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
187 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
188 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
189 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
190 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
191 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3),
192 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
193 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
194 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
195 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
196 EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
197 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
198 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
199 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
200 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
201 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
202 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
203 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
204 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
205 EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2),
206 {},
207};
208
209__init void sysfb_apply_efi_quirks(void)
210{
211 if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
212 !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
213 dmi_check_system(efifb_dmi_system_table);
214}
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
new file mode 100644
index 000000000000..22513e96b012
--- /dev/null
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -0,0 +1,95 @@
1/*
2 * Generic System Framebuffers on x86
3 * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11/*
12 * simple-framebuffer probing
13 * Try to convert "screen_info" into a "simple-framebuffer" compatible mode.
14 * If the mode is incompatible, we return "false" and let the caller create
15 * legacy nodes instead.
16 */
17
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/platform_data/simplefb.h>
23#include <linux/platform_device.h>
24#include <linux/screen_info.h>
25#include <asm/sysfb.h>
26
27static const char simplefb_resname[] = "BOOTFB";
28static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
29
30/* try parsing x86 screen_info into a simple-framebuffer mode struct */
31__init bool parse_mode(const struct screen_info *si,
32 struct simplefb_platform_data *mode)
33{
34 const struct simplefb_format *f;
35 __u8 type;
36 unsigned int i;
37
38 type = si->orig_video_isVGA;
39 if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
40 return false;
41
42 for (i = 0; i < ARRAY_SIZE(formats); ++i) {
43 f = &formats[i];
44 if (si->lfb_depth == f->bits_per_pixel &&
45 si->red_size == f->red.length &&
46 si->red_pos == f->red.offset &&
47 si->green_size == f->green.length &&
48 si->green_pos == f->green.offset &&
49 si->blue_size == f->blue.length &&
50 si->blue_pos == f->blue.offset &&
51 si->rsvd_size == f->transp.length &&
52 si->rsvd_pos == f->transp.offset) {
53 mode->format = f->name;
54 mode->width = si->lfb_width;
55 mode->height = si->lfb_height;
56 mode->stride = si->lfb_linelength;
57 return true;
58 }
59 }
60
61 return false;
62}
63
64__init int create_simplefb(const struct screen_info *si,
65 const struct simplefb_platform_data *mode)
66{
67 struct platform_device *pd;
68 struct resource res;
69 unsigned long len;
70
71 /* don't use lfb_size as it may contain the whole VMEM instead of only
72 * the part that is occupied by the framebuffer */
73 len = mode->height * mode->stride;
74 len = PAGE_ALIGN(len);
75 if (len > si->lfb_size << 16) {
76 printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
77 return -EINVAL;
78 }
79
80 /* setup IORESOURCE_MEM as framebuffer memory */
81 memset(&res, 0, sizeof(res));
82 res.flags = IORESOURCE_MEM;
83 res.name = simplefb_resname;
84 res.start = si->lfb_base;
85 res.end = si->lfb_base + len - 1;
86 if (res.end <= res.start)
87 return -EINVAL;
88
89 pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0,
90 &res, 1, mode, sizeof(*mode));
91 if (IS_ERR(pd))
92 return PTR_ERR(pd);
93
94 return 0;
95}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index addf7b58f4e8..91a4496db434 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -301,6 +301,15 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
301 return 0; 301 return 0;
302} 302}
303 303
304static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
305{
306 if (!tboot_enabled())
307 return 0;
308
309 pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
310 return -ENODEV;
311}
312
304static atomic_t ap_wfs_count; 313static atomic_t ap_wfs_count;
305 314
306static int tboot_wait_for_aps(int num_aps) 315static int tboot_wait_for_aps(int num_aps)
@@ -422,6 +431,7 @@ static __init int tboot_late_init(void)
422#endif 431#endif
423 432
424 acpi_os_set_prepare_sleep(&tboot_sleep); 433 acpi_os_set_prepare_sleep(&tboot_sleep);
434 acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep);
425 return 0; 435 return 0;
426} 436}
427 437
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1b23a1c92746..8c8093b146ca 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -58,6 +58,7 @@
58#include <asm/mce.h> 58#include <asm/mce.h>
59#include <asm/fixmap.h> 59#include <asm/fixmap.h>
60#include <asm/mach_traps.h> 60#include <asm/mach_traps.h>
61#include <asm/alternative.h>
61 62
62#ifdef CONFIG_X86_64 63#ifdef CONFIG_X86_64
63#include <asm/x86_init.h> 64#include <asm/x86_init.h>
@@ -327,6 +328,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
327 ftrace_int3_handler(regs)) 328 ftrace_int3_handler(regs))
328 return; 329 return;
329#endif 330#endif
331 if (poke_int3_handler(regs))
332 return;
333
330 prev_state = exception_enter(); 334 prev_state = exception_enter();
331#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 335#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
332 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, 336 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6ff49247edf8..930e5d48f560 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -89,6 +89,12 @@ int check_tsc_unstable(void)
89} 89}
90EXPORT_SYMBOL_GPL(check_tsc_unstable); 90EXPORT_SYMBOL_GPL(check_tsc_unstable);
91 91
92int check_tsc_disabled(void)
93{
94 return tsc_disabled;
95}
96EXPORT_SYMBOL_GPL(check_tsc_disabled);
97
92#ifdef CONFIG_X86_TSC 98#ifdef CONFIG_X86_TSC
93int __init notsc_setup(char *str) 99int __init notsc_setup(char *str)
94{ 100{
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a20ecb5b6cbf..b110fe6c03d4 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -413,7 +413,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
413 (1 << KVM_FEATURE_CLOCKSOURCE2) | 413 (1 << KVM_FEATURE_CLOCKSOURCE2) |
414 (1 << KVM_FEATURE_ASYNC_PF) | 414 (1 << KVM_FEATURE_ASYNC_PF) |
415 (1 << KVM_FEATURE_PV_EOI) | 415 (1 << KVM_FEATURE_PV_EOI) |
416 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT); 416 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
417 (1 << KVM_FEATURE_PV_UNHALT);
417 418
418 if (sched_info_on()) 419 if (sched_info_on())
419 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); 420 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index afc11245827c..5439117d5c4c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -79,16 +79,6 @@ static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
79 *((u32 *) (apic->regs + reg_off)) = val; 79 *((u32 *) (apic->regs + reg_off)) = val;
80} 80}
81 81
82static inline int apic_test_and_set_vector(int vec, void *bitmap)
83{
84 return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
85}
86
87static inline int apic_test_and_clear_vector(int vec, void *bitmap)
88{
89 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
90}
91
92static inline int apic_test_vector(int vec, void *bitmap) 82static inline int apic_test_vector(int vec, void *bitmap)
93{ 83{
94 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 84 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -331,10 +321,10 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
331} 321}
332EXPORT_SYMBOL_GPL(kvm_apic_update_irr); 322EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
333 323
334static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic) 324static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
335{ 325{
336 apic->irr_pending = true; 326 apic->irr_pending = true;
337 return apic_test_and_set_vector(vec, apic->regs + APIC_IRR); 327 apic_set_vector(vec, apic->regs + APIC_IRR);
338} 328}
339 329
340static inline int apic_search_irr(struct kvm_lapic *apic) 330static inline int apic_search_irr(struct kvm_lapic *apic)
@@ -681,32 +671,28 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
681 if (unlikely(!apic_enabled(apic))) 671 if (unlikely(!apic_enabled(apic)))
682 break; 672 break;
683 673
674 result = 1;
675
684 if (dest_map) 676 if (dest_map)
685 __set_bit(vcpu->vcpu_id, dest_map); 677 __set_bit(vcpu->vcpu_id, dest_map);
686 678
687 if (kvm_x86_ops->deliver_posted_interrupt) { 679 if (kvm_x86_ops->deliver_posted_interrupt)
688 result = 1;
689 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); 680 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
690 } else { 681 else {
691 result = !apic_test_and_set_irr(vector, apic); 682 apic_set_irr(vector, apic);
692
693 if (!result) {
694 if (trig_mode)
695 apic_debug("level trig mode repeatedly "
696 "for vector %d", vector);
697 goto out;
698 }
699 683
700 kvm_make_request(KVM_REQ_EVENT, vcpu); 684 kvm_make_request(KVM_REQ_EVENT, vcpu);
701 kvm_vcpu_kick(vcpu); 685 kvm_vcpu_kick(vcpu);
702 } 686 }
703out:
704 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode, 687 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
705 trig_mode, vector, !result); 688 trig_mode, vector, false);
706 break; 689 break;
707 690
708 case APIC_DM_REMRD: 691 case APIC_DM_REMRD:
709 apic_debug("Ignoring delivery mode 3\n"); 692 result = 1;
693 vcpu->arch.pv.pv_unhalted = 1;
694 kvm_make_request(KVM_REQ_EVENT, vcpu);
695 kvm_vcpu_kick(vcpu);
710 break; 696 break;
711 697
712 case APIC_DM_SMI: 698 case APIC_DM_SMI:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9e9285ae9b94..6e2d2c8f230b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -132,8 +132,8 @@ module_param(dbg, bool, 0644);
132 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \ 132 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
133 * PT32_LEVEL_BITS))) - 1)) 133 * PT32_LEVEL_BITS))) - 1))
134 134
135#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 135#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
136 | PT64_NX_MASK) 136 | shadow_x_mask | shadow_nx_mask)
137 137
138#define ACC_EXEC_MASK 1 138#define ACC_EXEC_MASK 1
139#define ACC_WRITE_MASK PT_WRITABLE_MASK 139#define ACC_WRITE_MASK PT_WRITABLE_MASK
@@ -331,11 +331,6 @@ static int is_large_pte(u64 pte)
331 return pte & PT_PAGE_SIZE_MASK; 331 return pte & PT_PAGE_SIZE_MASK;
332} 332}
333 333
334static int is_dirty_gpte(unsigned long pte)
335{
336 return pte & PT_DIRTY_MASK;
337}
338
339static int is_rmap_spte(u64 pte) 334static int is_rmap_spte(u64 pte)
340{ 335{
341 return is_shadow_present_pte(pte); 336 return is_shadow_present_pte(pte);
@@ -2052,12 +2047,18 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
2052 return __shadow_walk_next(iterator, *iterator->sptep); 2047 return __shadow_walk_next(iterator, *iterator->sptep);
2053} 2048}
2054 2049
2055static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp) 2050static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
2056{ 2051{
2057 u64 spte; 2052 u64 spte;
2058 2053
2054 BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
2055 VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
2056
2059 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK | 2057 spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
2060 shadow_user_mask | shadow_x_mask | shadow_accessed_mask; 2058 shadow_user_mask | shadow_x_mask;
2059
2060 if (accessed)
2061 spte |= shadow_accessed_mask;
2061 2062
2062 mmu_spte_set(sptep, spte); 2063 mmu_spte_set(sptep, spte);
2063} 2064}
@@ -2574,14 +2575,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2574 mmu_free_roots(vcpu); 2575 mmu_free_roots(vcpu);
2575} 2576}
2576 2577
2577static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
2578{
2579 int bit7;
2580
2581 bit7 = (gpte >> 7) & 1;
2582 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
2583}
2584
2585static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, 2578static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2586 bool no_dirty_log) 2579 bool no_dirty_log)
2587{ 2580{
@@ -2594,26 +2587,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
2594 return gfn_to_pfn_memslot_atomic(slot, gfn); 2587 return gfn_to_pfn_memslot_atomic(slot, gfn);
2595} 2588}
2596 2589
2597static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
2598 struct kvm_mmu_page *sp, u64 *spte,
2599 u64 gpte)
2600{
2601 if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
2602 goto no_present;
2603
2604 if (!is_present_gpte(gpte))
2605 goto no_present;
2606
2607 if (!(gpte & PT_ACCESSED_MASK))
2608 goto no_present;
2609
2610 return false;
2611
2612no_present:
2613 drop_spte(vcpu->kvm, spte);
2614 return true;
2615}
2616
2617static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu, 2590static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
2618 struct kvm_mmu_page *sp, 2591 struct kvm_mmu_page *sp,
2619 u64 *start, u64 *end) 2592 u64 *start, u64 *end)
@@ -2710,7 +2683,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
2710 iterator.level - 1, 2683 iterator.level - 1,
2711 1, ACC_ALL, iterator.sptep); 2684 1, ACC_ALL, iterator.sptep);
2712 2685
2713 link_shadow_page(iterator.sptep, sp); 2686 link_shadow_page(iterator.sptep, sp, true);
2714 } 2687 }
2715 } 2688 }
2716 return emulate; 2689 return emulate;
@@ -2808,7 +2781,7 @@ exit:
2808 return ret; 2781 return ret;
2809} 2782}
2810 2783
2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) 2784static bool page_fault_can_be_fast(u32 error_code)
2812{ 2785{
2813 /* 2786 /*
2814 * Do not fix the mmio spte with invalid generation number which 2787 * Do not fix the mmio spte with invalid generation number which
@@ -2861,7 +2834,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
2861 bool ret = false; 2834 bool ret = false;
2862 u64 spte = 0ull; 2835 u64 spte = 0ull;
2863 2836
2864 if (!page_fault_can_be_fast(vcpu, error_code)) 2837 if (!page_fault_can_be_fast(error_code))
2865 return false; 2838 return false;
2866 2839
2867 walk_shadow_page_lockless_begin(vcpu); 2840 walk_shadow_page_lockless_begin(vcpu);
@@ -3209,6 +3182,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
3209 mmu_sync_roots(vcpu); 3182 mmu_sync_roots(vcpu);
3210 spin_unlock(&vcpu->kvm->mmu_lock); 3183 spin_unlock(&vcpu->kvm->mmu_lock);
3211} 3184}
3185EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
3212 3186
3213static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, 3187static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
3214 u32 access, struct x86_exception *exception) 3188 u32 access, struct x86_exception *exception)
@@ -3478,6 +3452,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3478 ++vcpu->stat.tlb_flush; 3452 ++vcpu->stat.tlb_flush;
3479 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 3453 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
3480} 3454}
3455EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
3481 3456
3482static void paging_new_cr3(struct kvm_vcpu *vcpu) 3457static void paging_new_cr3(struct kvm_vcpu *vcpu)
3483{ 3458{
@@ -3501,18 +3476,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
3501 nonpaging_free(vcpu); 3476 nonpaging_free(vcpu);
3502} 3477}
3503 3478
3504static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
3505{
3506 unsigned mask;
3507
3508 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
3509
3510 mask = (unsigned)~ACC_WRITE_MASK;
3511 /* Allow write access to dirty gptes */
3512 mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
3513 *access &= mask;
3514}
3515
3516static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn, 3479static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
3517 unsigned access, int *nr_present) 3480 unsigned access, int *nr_present)
3518{ 3481{
@@ -3530,16 +3493,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
3530 return false; 3493 return false;
3531} 3494}
3532 3495
3533static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
3534{
3535 unsigned access;
3536
3537 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
3538 access &= ~(gpte >> PT64_NX_SHIFT);
3539
3540 return access;
3541}
3542
3543static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte) 3496static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
3544{ 3497{
3545 unsigned index; 3498 unsigned index;
@@ -3549,6 +3502,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gp
3549 return mmu->last_pte_bitmap & (1 << index); 3502 return mmu->last_pte_bitmap & (1 << index);
3550} 3503}
3551 3504
3505#define PTTYPE_EPT 18 /* arbitrary */
3506#define PTTYPE PTTYPE_EPT
3507#include "paging_tmpl.h"
3508#undef PTTYPE
3509
3552#define PTTYPE 64 3510#define PTTYPE 64
3553#include "paging_tmpl.h" 3511#include "paging_tmpl.h"
3554#undef PTTYPE 3512#undef PTTYPE
@@ -3563,6 +3521,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3563 int maxphyaddr = cpuid_maxphyaddr(vcpu); 3521 int maxphyaddr = cpuid_maxphyaddr(vcpu);
3564 u64 exb_bit_rsvd = 0; 3522 u64 exb_bit_rsvd = 0;
3565 3523
3524 context->bad_mt_xwr = 0;
3525
3566 if (!context->nx) 3526 if (!context->nx)
3567 exb_bit_rsvd = rsvd_bits(63, 63); 3527 exb_bit_rsvd = rsvd_bits(63, 63);
3568 switch (context->root_level) { 3528 switch (context->root_level) {
@@ -3618,7 +3578,40 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
3618 } 3578 }
3619} 3579}
3620 3580
3621static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) 3581static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3582 struct kvm_mmu *context, bool execonly)
3583{
3584 int maxphyaddr = cpuid_maxphyaddr(vcpu);
3585 int pte;
3586
3587 context->rsvd_bits_mask[0][3] =
3588 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
3589 context->rsvd_bits_mask[0][2] =
3590 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3591 context->rsvd_bits_mask[0][1] =
3592 rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
3593 context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
3594
3595 /* large page */
3596 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
3597 context->rsvd_bits_mask[1][2] =
3598 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
3599 context->rsvd_bits_mask[1][1] =
3600 rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
3601 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
3602
3603 for (pte = 0; pte < 64; pte++) {
3604 int rwx_bits = pte & 7;
3605 int mt = pte >> 3;
3606 if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
3607 rwx_bits == 0x2 || rwx_bits == 0x6 ||
3608 (rwx_bits == 0x4 && !execonly))
3609 context->bad_mt_xwr |= (1ull << pte);
3610 }
3611}
3612
3613static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3614 struct kvm_mmu *mmu, bool ept)
3622{ 3615{
3623 unsigned bit, byte, pfec; 3616 unsigned bit, byte, pfec;
3624 u8 map; 3617 u8 map;
@@ -3636,12 +3629,16 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
3636 w = bit & ACC_WRITE_MASK; 3629 w = bit & ACC_WRITE_MASK;
3637 u = bit & ACC_USER_MASK; 3630 u = bit & ACC_USER_MASK;
3638 3631
3639 /* Not really needed: !nx will cause pte.nx to fault */ 3632 if (!ept) {
3640 x |= !mmu->nx; 3633 /* Not really needed: !nx will cause pte.nx to fault */
3641 /* Allow supervisor writes if !cr0.wp */ 3634 x |= !mmu->nx;
3642 w |= !is_write_protection(vcpu) && !uf; 3635 /* Allow supervisor writes if !cr0.wp */
3643 /* Disallow supervisor fetches of user code if cr4.smep */ 3636 w |= !is_write_protection(vcpu) && !uf;
3644 x &= !(smep && u && !uf); 3637 /* Disallow supervisor fetches of user code if cr4.smep */
3638 x &= !(smep && u && !uf);
3639 } else
3640 /* Not really needed: no U/S accesses on ept */
3641 u = 1;
3645 3642
3646 fault = (ff && !x) || (uf && !u) || (wf && !w); 3643 fault = (ff && !x) || (uf && !u) || (wf && !w);
3647 map |= fault << bit; 3644 map |= fault << bit;
@@ -3676,7 +3673,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
3676 context->root_level = level; 3673 context->root_level = level;
3677 3674
3678 reset_rsvds_bits_mask(vcpu, context); 3675 reset_rsvds_bits_mask(vcpu, context);
3679 update_permission_bitmask(vcpu, context); 3676 update_permission_bitmask(vcpu, context, false);
3680 update_last_pte_bitmap(vcpu, context); 3677 update_last_pte_bitmap(vcpu, context);
3681 3678
3682 ASSERT(is_pae(vcpu)); 3679 ASSERT(is_pae(vcpu));
@@ -3706,7 +3703,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
3706 context->root_level = PT32_ROOT_LEVEL; 3703 context->root_level = PT32_ROOT_LEVEL;
3707 3704
3708 reset_rsvds_bits_mask(vcpu, context); 3705 reset_rsvds_bits_mask(vcpu, context);
3709 update_permission_bitmask(vcpu, context); 3706 update_permission_bitmask(vcpu, context, false);
3710 update_last_pte_bitmap(vcpu, context); 3707 update_last_pte_bitmap(vcpu, context);
3711 3708
3712 context->new_cr3 = paging_new_cr3; 3709 context->new_cr3 = paging_new_cr3;
@@ -3768,7 +3765,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
3768 context->gva_to_gpa = paging32_gva_to_gpa; 3765 context->gva_to_gpa = paging32_gva_to_gpa;
3769 } 3766 }
3770 3767
3771 update_permission_bitmask(vcpu, context); 3768 update_permission_bitmask(vcpu, context, false);
3772 update_last_pte_bitmap(vcpu, context); 3769 update_last_pte_bitmap(vcpu, context);
3773 3770
3774 return 0; 3771 return 0;
@@ -3800,6 +3797,33 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
3800} 3797}
3801EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu); 3798EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
3802 3799
3800int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
3801 bool execonly)
3802{
3803 ASSERT(vcpu);
3804 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
3805
3806 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
3807
3808 context->nx = true;
3809 context->new_cr3 = paging_new_cr3;
3810 context->page_fault = ept_page_fault;
3811 context->gva_to_gpa = ept_gva_to_gpa;
3812 context->sync_page = ept_sync_page;
3813 context->invlpg = ept_invlpg;
3814 context->update_pte = ept_update_pte;
3815 context->free = paging_free;
3816 context->root_level = context->shadow_root_level;
3817 context->root_hpa = INVALID_PAGE;
3818 context->direct_map = false;
3819
3820 update_permission_bitmask(vcpu, context, true);
3821 reset_rsvds_bits_mask_ept(vcpu, context, execonly);
3822
3823 return 0;
3824}
3825EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
3826
3803static int init_kvm_softmmu(struct kvm_vcpu *vcpu) 3827static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
3804{ 3828{
3805 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu); 3829 int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
@@ -3847,7 +3871,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
3847 g_context->gva_to_gpa = paging32_gva_to_gpa_nested; 3871 g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
3848 } 3872 }
3849 3873
3850 update_permission_bitmask(vcpu, g_context); 3874 update_permission_bitmask(vcpu, g_context, false);
3851 update_last_pte_bitmap(vcpu, g_context); 3875 update_last_pte_bitmap(vcpu, g_context);
3852 3876
3853 return 0; 3877 return 0;
@@ -3923,8 +3947,8 @@ static bool need_remote_flush(u64 old, u64 new)
3923 return true; 3947 return true;
3924 if ((old ^ new) & PT64_BASE_ADDR_MASK) 3948 if ((old ^ new) & PT64_BASE_ADDR_MASK)
3925 return true; 3949 return true;
3926 old ^= PT64_NX_MASK; 3950 old ^= shadow_nx_mask;
3927 new ^= PT64_NX_MASK; 3951 new ^= shadow_nx_mask;
3928 return (old & ~new & PT64_PERM_MASK) != 0; 3952 return (old & ~new & PT64_PERM_MASK) != 0;
3929} 3953}
3930 3954
@@ -4182,7 +4206,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
4182 switch (er) { 4206 switch (er) {
4183 case EMULATE_DONE: 4207 case EMULATE_DONE:
4184 return 1; 4208 return 1;
4185 case EMULATE_DO_MMIO: 4209 case EMULATE_USER_EXIT:
4186 ++vcpu->stat.mmio_exits; 4210 ++vcpu->stat.mmio_exits;
4187 /* fall through */ 4211 /* fall through */
4188 case EMULATE_FAIL: 4212 case EMULATE_FAIL:
@@ -4390,11 +4414,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
4390 /* 4414 /*
4391 * The very rare case: if the generation-number is round, 4415 * The very rare case: if the generation-number is round,
4392 * zap all shadow pages. 4416 * zap all shadow pages.
4393 *
4394 * The max value is MMIO_MAX_GEN - 1 since it is not called
4395 * when mark memslot invalid.
4396 */ 4417 */
4397 if (unlikely(kvm_current_mmio_generation(kvm) >= (MMIO_MAX_GEN - 1))) { 4418 if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
4398 printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n"); 4419 printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
4399 kvm_mmu_invalidate_zap_all_pages(kvm); 4420 kvm_mmu_invalidate_zap_all_pages(kvm);
4400 } 4421 }
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 5b59c573aba7..77e044a0f5f7 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,6 +71,8 @@ enum {
71 71
72int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); 72int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 73int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
75 bool execonly);
74 76
75static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
76{ 78{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7769699d48a8..043330159179 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -23,6 +23,13 @@
23 * so the code in this file is compiled twice, once per pte size. 23 * so the code in this file is compiled twice, once per pte size.
24 */ 24 */
25 25
26/*
27 * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
28 * uses for EPT without A/D paging type.
29 */
30extern u64 __pure __using_nonexistent_pte_bit(void)
31 __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
32
26#if PTTYPE == 64 33#if PTTYPE == 64
27 #define pt_element_t u64 34 #define pt_element_t u64
28 #define guest_walker guest_walker64 35 #define guest_walker guest_walker64
@@ -32,6 +39,10 @@
32 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl) 39 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
33 #define PT_INDEX(addr, level) PT64_INDEX(addr, level) 40 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
34 #define PT_LEVEL_BITS PT64_LEVEL_BITS 41 #define PT_LEVEL_BITS PT64_LEVEL_BITS
42 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
43 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
44 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
45 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
35 #ifdef CONFIG_X86_64 46 #ifdef CONFIG_X86_64
36 #define PT_MAX_FULL_LEVELS 4 47 #define PT_MAX_FULL_LEVELS 4
37 #define CMPXCHG cmpxchg 48 #define CMPXCHG cmpxchg
@@ -49,7 +60,26 @@
49 #define PT_INDEX(addr, level) PT32_INDEX(addr, level) 60 #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
50 #define PT_LEVEL_BITS PT32_LEVEL_BITS 61 #define PT_LEVEL_BITS PT32_LEVEL_BITS
51 #define PT_MAX_FULL_LEVELS 2 62 #define PT_MAX_FULL_LEVELS 2
63 #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
64 #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
65 #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
66 #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
52 #define CMPXCHG cmpxchg 67 #define CMPXCHG cmpxchg
68#elif PTTYPE == PTTYPE_EPT
69 #define pt_element_t u64
70 #define guest_walker guest_walkerEPT
71 #define FNAME(name) ept_##name
72 #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
73 #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
74 #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
75 #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
76 #define PT_LEVEL_BITS PT64_LEVEL_BITS
77 #define PT_GUEST_ACCESSED_MASK 0
78 #define PT_GUEST_DIRTY_MASK 0
79 #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
80 #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
81 #define CMPXCHG cmpxchg64
82 #define PT_MAX_FULL_LEVELS 4
53#else 83#else
54 #error Invalid PTTYPE value 84 #error Invalid PTTYPE value
55#endif 85#endif
@@ -80,6 +110,40 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
80 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; 110 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
81} 111}
82 112
113static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
114{
115 unsigned mask;
116
117 /* dirty bit is not supported, so no need to track it */
118 if (!PT_GUEST_DIRTY_MASK)
119 return;
120
121 BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
122
123 mask = (unsigned)~ACC_WRITE_MASK;
124 /* Allow write access to dirty gptes */
125 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
126 PT_WRITABLE_MASK;
127 *access &= mask;
128}
129
130static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
131{
132 int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f;
133
134 return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) |
135 ((mmu->bad_mt_xwr & (1ull << low6)) != 0);
136}
137
138static inline int FNAME(is_present_gpte)(unsigned long pte)
139{
140#if PTTYPE != PTTYPE_EPT
141 return is_present_gpte(pte);
142#else
143 return pte & 7;
144#endif
145}
146
83static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 147static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
84 pt_element_t __user *ptep_user, unsigned index, 148 pt_element_t __user *ptep_user, unsigned index,
85 pt_element_t orig_pte, pt_element_t new_pte) 149 pt_element_t orig_pte, pt_element_t new_pte)
@@ -103,6 +167,42 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
103 return (ret != orig_pte); 167 return (ret != orig_pte);
104} 168}
105 169
170static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
171 struct kvm_mmu_page *sp, u64 *spte,
172 u64 gpte)
173{
174 if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
175 goto no_present;
176
177 if (!FNAME(is_present_gpte)(gpte))
178 goto no_present;
179
180 /* if accessed bit is not supported prefetch non accessed gpte */
181 if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
182 goto no_present;
183
184 return false;
185
186no_present:
187 drop_spte(vcpu->kvm, spte);
188 return true;
189}
190
191static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
192{
193 unsigned access;
194#if PTTYPE == PTTYPE_EPT
195 access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
196 ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
197 ACC_USER_MASK;
198#else
199 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
200 access &= ~(gpte >> PT64_NX_SHIFT);
201#endif
202
203 return access;
204}
205
106static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, 206static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
107 struct kvm_mmu *mmu, 207 struct kvm_mmu *mmu,
108 struct guest_walker *walker, 208 struct guest_walker *walker,
@@ -114,18 +214,23 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
114 gfn_t table_gfn; 214 gfn_t table_gfn;
115 int ret; 215 int ret;
116 216
217 /* dirty/accessed bits are not supported, so no need to update them */
218 if (!PT_GUEST_DIRTY_MASK)
219 return 0;
220
117 for (level = walker->max_level; level >= walker->level; --level) { 221 for (level = walker->max_level; level >= walker->level; --level) {
118 pte = orig_pte = walker->ptes[level - 1]; 222 pte = orig_pte = walker->ptes[level - 1];
119 table_gfn = walker->table_gfn[level - 1]; 223 table_gfn = walker->table_gfn[level - 1];
120 ptep_user = walker->ptep_user[level - 1]; 224 ptep_user = walker->ptep_user[level - 1];
121 index = offset_in_page(ptep_user) / sizeof(pt_element_t); 225 index = offset_in_page(ptep_user) / sizeof(pt_element_t);
122 if (!(pte & PT_ACCESSED_MASK)) { 226 if (!(pte & PT_GUEST_ACCESSED_MASK)) {
123 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); 227 trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
124 pte |= PT_ACCESSED_MASK; 228 pte |= PT_GUEST_ACCESSED_MASK;
125 } 229 }
126 if (level == walker->level && write_fault && !is_dirty_gpte(pte)) { 230 if (level == walker->level && write_fault &&
231 !(pte & PT_GUEST_DIRTY_MASK)) {
127 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 232 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
128 pte |= PT_DIRTY_MASK; 233 pte |= PT_GUEST_DIRTY_MASK;
129 } 234 }
130 if (pte == orig_pte) 235 if (pte == orig_pte)
131 continue; 236 continue;
@@ -170,7 +275,7 @@ retry_walk:
170 if (walker->level == PT32E_ROOT_LEVEL) { 275 if (walker->level == PT32E_ROOT_LEVEL) {
171 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3); 276 pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
172 trace_kvm_mmu_paging_element(pte, walker->level); 277 trace_kvm_mmu_paging_element(pte, walker->level);
173 if (!is_present_gpte(pte)) 278 if (!FNAME(is_present_gpte)(pte))
174 goto error; 279 goto error;
175 --walker->level; 280 --walker->level;
176 } 281 }
@@ -179,7 +284,7 @@ retry_walk:
179 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || 284 ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
180 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0); 285 (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
181 286
182 accessed_dirty = PT_ACCESSED_MASK; 287 accessed_dirty = PT_GUEST_ACCESSED_MASK;
183 pt_access = pte_access = ACC_ALL; 288 pt_access = pte_access = ACC_ALL;
184 ++walker->level; 289 ++walker->level;
185 290
@@ -215,17 +320,17 @@ retry_walk:
215 320
216 trace_kvm_mmu_paging_element(pte, walker->level); 321 trace_kvm_mmu_paging_element(pte, walker->level);
217 322
218 if (unlikely(!is_present_gpte(pte))) 323 if (unlikely(!FNAME(is_present_gpte)(pte)))
219 goto error; 324 goto error;
220 325
221 if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte, 326 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
222 walker->level))) { 327 walker->level))) {
223 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK; 328 errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
224 goto error; 329 goto error;
225 } 330 }
226 331
227 accessed_dirty &= pte; 332 accessed_dirty &= pte;
228 pte_access = pt_access & gpte_access(vcpu, pte); 333 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
229 334
230 walker->ptes[walker->level - 1] = pte; 335 walker->ptes[walker->level - 1] = pte;
231 } while (!is_last_gpte(mmu, walker->level, pte)); 336 } while (!is_last_gpte(mmu, walker->level, pte));
@@ -248,13 +353,15 @@ retry_walk:
248 walker->gfn = real_gpa >> PAGE_SHIFT; 353 walker->gfn = real_gpa >> PAGE_SHIFT;
249 354
250 if (!write_fault) 355 if (!write_fault)
251 protect_clean_gpte(&pte_access, pte); 356 FNAME(protect_clean_gpte)(&pte_access, pte);
252 else 357 else
253 /* 358 /*
254 * On a write fault, fold the dirty bit into accessed_dirty by 359 * On a write fault, fold the dirty bit into accessed_dirty.
255 * shifting it one place right. 360 * For modes without A/D bits support accessed_dirty will be
361 * always clear.
256 */ 362 */
257 accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT); 363 accessed_dirty &= pte >>
364 (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
258 365
259 if (unlikely(!accessed_dirty)) { 366 if (unlikely(!accessed_dirty)) {
260 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); 367 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
@@ -279,6 +386,25 @@ error:
279 walker->fault.vector = PF_VECTOR; 386 walker->fault.vector = PF_VECTOR;
280 walker->fault.error_code_valid = true; 387 walker->fault.error_code_valid = true;
281 walker->fault.error_code = errcode; 388 walker->fault.error_code = errcode;
389
390#if PTTYPE == PTTYPE_EPT
391 /*
392 * Use PFERR_RSVD_MASK in error_code to to tell if EPT
393 * misconfiguration requires to be injected. The detection is
394 * done by is_rsvd_bits_set() above.
395 *
396 * We set up the value of exit_qualification to inject:
397 * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
398 * [5:3] - Calculated by the page walk of the guest EPT page tables
399 * [7:8] - Derived from [7:8] of real exit_qualification
400 *
401 * The other bits are set to 0.
402 */
403 if (!(errcode & PFERR_RSVD_MASK)) {
404 vcpu->arch.exit_qualification &= 0x187;
405 vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
406 }
407#endif
282 walker->fault.address = addr; 408 walker->fault.address = addr;
283 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; 409 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
284 410
@@ -293,6 +419,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
293 access); 419 access);
294} 420}
295 421
422#if PTTYPE != PTTYPE_EPT
296static int FNAME(walk_addr_nested)(struct guest_walker *walker, 423static int FNAME(walk_addr_nested)(struct guest_walker *walker,
297 struct kvm_vcpu *vcpu, gva_t addr, 424 struct kvm_vcpu *vcpu, gva_t addr,
298 u32 access) 425 u32 access)
@@ -300,6 +427,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
300 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, 427 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
301 addr, access); 428 addr, access);
302} 429}
430#endif
303 431
304static bool 432static bool
305FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 433FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -309,14 +437,14 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
309 gfn_t gfn; 437 gfn_t gfn;
310 pfn_t pfn; 438 pfn_t pfn;
311 439
312 if (prefetch_invalid_gpte(vcpu, sp, spte, gpte)) 440 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
313 return false; 441 return false;
314 442
315 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 443 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
316 444
317 gfn = gpte_to_gfn(gpte); 445 gfn = gpte_to_gfn(gpte);
318 pte_access = sp->role.access & gpte_access(vcpu, gpte); 446 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
319 protect_clean_gpte(&pte_access, gpte); 447 FNAME(protect_clean_gpte)(&pte_access, gpte);
320 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 448 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
321 no_dirty_log && (pte_access & ACC_WRITE_MASK)); 449 no_dirty_log && (pte_access & ACC_WRITE_MASK));
322 if (is_error_pfn(pfn)) 450 if (is_error_pfn(pfn))
@@ -446,7 +574,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
446 goto out_gpte_changed; 574 goto out_gpte_changed;
447 575
448 if (sp) 576 if (sp)
449 link_shadow_page(it.sptep, sp); 577 link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
450 } 578 }
451 579
452 for (; 580 for (;
@@ -466,7 +594,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
466 594
467 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1, 595 sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
468 true, direct_access, it.sptep); 596 true, direct_access, it.sptep);
469 link_shadow_page(it.sptep, sp); 597 link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
470 } 598 }
471 599
472 clear_sp_write_flooding_count(it.sptep); 600 clear_sp_write_flooding_count(it.sptep);
@@ -727,6 +855,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
727 return gpa; 855 return gpa;
728} 856}
729 857
858#if PTTYPE != PTTYPE_EPT
730static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, 859static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
731 u32 access, 860 u32 access,
732 struct x86_exception *exception) 861 struct x86_exception *exception)
@@ -745,6 +874,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
745 874
746 return gpa; 875 return gpa;
747} 876}
877#endif
748 878
749/* 879/*
750 * Using the cached information from sp->gfns is safe because: 880 * Using the cached information from sp->gfns is safe because:
@@ -785,15 +915,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
785 sizeof(pt_element_t))) 915 sizeof(pt_element_t)))
786 return -EINVAL; 916 return -EINVAL;
787 917
788 if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) { 918 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
789 vcpu->kvm->tlbs_dirty++; 919 vcpu->kvm->tlbs_dirty++;
790 continue; 920 continue;
791 } 921 }
792 922
793 gfn = gpte_to_gfn(gpte); 923 gfn = gpte_to_gfn(gpte);
794 pte_access = sp->role.access; 924 pte_access = sp->role.access;
795 pte_access &= gpte_access(vcpu, gpte); 925 pte_access &= FNAME(gpte_access)(vcpu, gpte);
796 protect_clean_gpte(&pte_access, gpte); 926 FNAME(protect_clean_gpte)(&pte_access, gpte);
797 927
798 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access, 928 if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
799 &nr_present)) 929 &nr_present))
@@ -830,3 +960,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
830#undef gpte_to_gfn 960#undef gpte_to_gfn
831#undef gpte_to_gfn_lvl 961#undef gpte_to_gfn_lvl
832#undef CMPXCHG 962#undef CMPXCHG
963#undef PT_GUEST_ACCESSED_MASK
964#undef PT_GUEST_DIRTY_MASK
965#undef PT_GUEST_DIRTY_SHIFT
966#undef PT_GUEST_ACCESSED_SHIFT
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c53e797e7369..5c4f63151b4d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
160 160
161static void reprogram_counter(struct kvm_pmc *pmc, u32 type, 161static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
162 unsigned config, bool exclude_user, bool exclude_kernel, 162 unsigned config, bool exclude_user, bool exclude_kernel,
163 bool intr) 163 bool intr, bool in_tx, bool in_tx_cp)
164{ 164{
165 struct perf_event *event; 165 struct perf_event *event;
166 struct perf_event_attr attr = { 166 struct perf_event_attr attr = {
@@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
173 .exclude_kernel = exclude_kernel, 173 .exclude_kernel = exclude_kernel,
174 .config = config, 174 .config = config,
175 }; 175 };
176 if (in_tx)
177 attr.config |= HSW_IN_TX;
178 if (in_tx_cp)
179 attr.config |= HSW_IN_TX_CHECKPOINTED;
176 180
177 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); 181 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
178 182
@@ -226,7 +230,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
226 230
227 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | 231 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
228 ARCH_PERFMON_EVENTSEL_INV | 232 ARCH_PERFMON_EVENTSEL_INV |
229 ARCH_PERFMON_EVENTSEL_CMASK))) { 233 ARCH_PERFMON_EVENTSEL_CMASK |
234 HSW_IN_TX |
235 HSW_IN_TX_CHECKPOINTED))) {
230 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select, 236 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
231 unit_mask); 237 unit_mask);
232 if (config != PERF_COUNT_HW_MAX) 238 if (config != PERF_COUNT_HW_MAX)
@@ -239,7 +245,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
239 reprogram_counter(pmc, type, config, 245 reprogram_counter(pmc, type, config,
240 !(eventsel & ARCH_PERFMON_EVENTSEL_USR), 246 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
241 !(eventsel & ARCH_PERFMON_EVENTSEL_OS), 247 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
242 eventsel & ARCH_PERFMON_EVENTSEL_INT); 248 eventsel & ARCH_PERFMON_EVENTSEL_INT,
249 (eventsel & HSW_IN_TX),
250 (eventsel & HSW_IN_TX_CHECKPOINTED));
243} 251}
244 252
245static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx) 253static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
@@ -256,7 +264,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
256 arch_events[fixed_pmc_events[idx]].event_type, 264 arch_events[fixed_pmc_events[idx]].event_type,
257 !(en & 0x2), /* exclude user */ 265 !(en & 0x2), /* exclude user */
258 !(en & 0x1), /* exclude kernel */ 266 !(en & 0x1), /* exclude kernel */
259 pmi); 267 pmi, false, false);
260} 268}
261 269
262static inline u8 fixed_en_pmi(u64 ctrl, int idx) 270static inline u8 fixed_en_pmi(u64 ctrl, int idx)
@@ -408,7 +416,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
408 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) { 416 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
409 if (data == pmc->eventsel) 417 if (data == pmc->eventsel)
410 return 0; 418 return 0;
411 if (!(data & 0xffffffff00200000ull)) { 419 if (!(data & pmu->reserved_bits)) {
412 reprogram_gp_counter(pmc, data); 420 reprogram_gp_counter(pmc, data);
413 return 0; 421 return 0;
414 } 422 }
@@ -450,6 +458,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
450 pmu->counter_bitmask[KVM_PMC_GP] = 0; 458 pmu->counter_bitmask[KVM_PMC_GP] = 0;
451 pmu->counter_bitmask[KVM_PMC_FIXED] = 0; 459 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
452 pmu->version = 0; 460 pmu->version = 0;
461 pmu->reserved_bits = 0xffffffff00200000ull;
453 462
454 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); 463 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
455 if (!entry) 464 if (!entry)
@@ -478,6 +487,12 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
478 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) | 487 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
479 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED); 488 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
480 pmu->global_ctrl_mask = ~pmu->global_ctrl; 489 pmu->global_ctrl_mask = ~pmu->global_ctrl;
490
491 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
492 if (entry &&
493 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
494 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
495 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
481} 496}
482 497
483void kvm_pmu_init(struct kvm_vcpu *vcpu) 498void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 064d0be67ecc..1f1da43ff2a2 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -373,6 +373,7 @@ struct nested_vmx {
373 * we must keep them pinned while L2 runs. 373 * we must keep them pinned while L2 runs.
374 */ 374 */
375 struct page *apic_access_page; 375 struct page *apic_access_page;
376 u64 msr_ia32_feature_control;
376}; 377};
377 378
378#define POSTED_INTR_ON 0 379#define POSTED_INTR_ON 0
@@ -711,10 +712,10 @@ static void nested_release_page_clean(struct page *page)
711 kvm_release_page_clean(page); 712 kvm_release_page_clean(page);
712} 713}
713 714
715static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
714static u64 construct_eptp(unsigned long root_hpa); 716static u64 construct_eptp(unsigned long root_hpa);
715static void kvm_cpu_vmxon(u64 addr); 717static void kvm_cpu_vmxon(u64 addr);
716static void kvm_cpu_vmxoff(void); 718static void kvm_cpu_vmxoff(void);
717static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
718static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 719static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
719static void vmx_set_segment(struct kvm_vcpu *vcpu, 720static void vmx_set_segment(struct kvm_vcpu *vcpu,
720 struct kvm_segment *var, int seg); 721 struct kvm_segment *var, int seg);
@@ -1039,12 +1040,16 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
1039 (vmcs12->secondary_vm_exec_control & bit); 1040 (vmcs12->secondary_vm_exec_control & bit);
1040} 1041}
1041 1042
1042static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12, 1043static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
1043 struct kvm_vcpu *vcpu)
1044{ 1044{
1045 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS; 1045 return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
1046} 1046}
1047 1047
1048static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
1049{
1050 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
1051}
1052
1048static inline bool is_exception(u32 intr_info) 1053static inline bool is_exception(u32 intr_info)
1049{ 1054{
1050 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) 1055 return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -2155,6 +2160,7 @@ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2155static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; 2160static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2156static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; 2161static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2157static u32 nested_vmx_misc_low, nested_vmx_misc_high; 2162static u32 nested_vmx_misc_low, nested_vmx_misc_high;
2163static u32 nested_vmx_ept_caps;
2158static __init void nested_vmx_setup_ctls_msrs(void) 2164static __init void nested_vmx_setup_ctls_msrs(void)
2159{ 2165{
2160 /* 2166 /*
@@ -2190,14 +2196,17 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2190 * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and 2196 * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
2191 * 17 must be 1. 2197 * 17 must be 1.
2192 */ 2198 */
2199 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
2200 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
2193 nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 2201 nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
2194 /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */ 2202 /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
2203 nested_vmx_exit_ctls_high &=
2195#ifdef CONFIG_X86_64 2204#ifdef CONFIG_X86_64
2196 nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE; 2205 VM_EXIT_HOST_ADDR_SPACE_SIZE |
2197#else
2198 nested_vmx_exit_ctls_high = 0;
2199#endif 2206#endif
2200 nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; 2207 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
2208 nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
2209 VM_EXIT_LOAD_IA32_EFER);
2201 2210
2202 /* entry controls */ 2211 /* entry controls */
2203 rdmsr(MSR_IA32_VMX_ENTRY_CTLS, 2212 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2205,8 +2214,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2205 /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */ 2214 /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
2206 nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 2215 nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
2207 nested_vmx_entry_ctls_high &= 2216 nested_vmx_entry_ctls_high &=
2208 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE; 2217#ifdef CONFIG_X86_64
2209 nested_vmx_entry_ctls_high |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; 2218 VM_ENTRY_IA32E_MODE |
2219#endif
2220 VM_ENTRY_LOAD_IA32_PAT;
2221 nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
2222 VM_ENTRY_LOAD_IA32_EFER);
2210 2223
2211 /* cpu-based controls */ 2224 /* cpu-based controls */
2212 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, 2225 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
@@ -2241,6 +2254,22 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2241 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2254 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2242 SECONDARY_EXEC_WBINVD_EXITING; 2255 SECONDARY_EXEC_WBINVD_EXITING;
2243 2256
2257 if (enable_ept) {
2258 /* nested EPT: emulate EPT also to L1 */
2259 nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
2260 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2261 VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
2262 nested_vmx_ept_caps &= vmx_capability.ept;
2263 /*
2264 * Since invept is completely emulated we support both global
2265 * and context invalidation independent of what host cpu
2266 * supports
2267 */
2268 nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
2269 VMX_EPT_EXTENT_CONTEXT_BIT;
2270 } else
2271 nested_vmx_ept_caps = 0;
2272
2244 /* miscellaneous data */ 2273 /* miscellaneous data */
2245 rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high); 2274 rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
2246 nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK | 2275 nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
@@ -2282,8 +2311,11 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2282 2311
2283 switch (msr_index) { 2312 switch (msr_index) {
2284 case MSR_IA32_FEATURE_CONTROL: 2313 case MSR_IA32_FEATURE_CONTROL:
2285 *pdata = 0; 2314 if (nested_vmx_allowed(vcpu)) {
2286 break; 2315 *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
2316 break;
2317 }
2318 return 0;
2287 case MSR_IA32_VMX_BASIC: 2319 case MSR_IA32_VMX_BASIC:
2288 /* 2320 /*
2289 * This MSR reports some information about VMX support. We 2321 * This MSR reports some information about VMX support. We
@@ -2346,8 +2378,8 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2346 nested_vmx_secondary_ctls_high); 2378 nested_vmx_secondary_ctls_high);
2347 break; 2379 break;
2348 case MSR_IA32_VMX_EPT_VPID_CAP: 2380 case MSR_IA32_VMX_EPT_VPID_CAP:
2349 /* Currently, no nested ept or nested vpid */ 2381 /* Currently, no nested vpid support */
2350 *pdata = 0; 2382 *pdata = nested_vmx_ept_caps;
2351 break; 2383 break;
2352 default: 2384 default:
2353 return 0; 2385 return 0;
@@ -2356,14 +2388,24 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2356 return 1; 2388 return 1;
2357} 2389}
2358 2390
2359static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) 2391static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2360{ 2392{
2393 u32 msr_index = msr_info->index;
2394 u64 data = msr_info->data;
2395 bool host_initialized = msr_info->host_initiated;
2396
2361 if (!nested_vmx_allowed(vcpu)) 2397 if (!nested_vmx_allowed(vcpu))
2362 return 0; 2398 return 0;
2363 2399
2364 if (msr_index == MSR_IA32_FEATURE_CONTROL) 2400 if (msr_index == MSR_IA32_FEATURE_CONTROL) {
2365 /* TODO: the right thing. */ 2401 if (!host_initialized &&
2402 to_vmx(vcpu)->nested.msr_ia32_feature_control
2403 & FEATURE_CONTROL_LOCKED)
2404 return 0;
2405 to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
2366 return 1; 2406 return 1;
2407 }
2408
2367 /* 2409 /*
2368 * No need to treat VMX capability MSRs specially: If we don't handle 2410 * No need to treat VMX capability MSRs specially: If we don't handle
2369 * them, handle_wrmsr will #GP(0), which is correct (they are readonly) 2411 * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
@@ -2494,7 +2536,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2494 return 1; 2536 return 1;
2495 /* Otherwise falls through */ 2537 /* Otherwise falls through */
2496 default: 2538 default:
2497 if (vmx_set_vmx_msr(vcpu, msr_index, data)) 2539 if (vmx_set_vmx_msr(vcpu, msr_info))
2498 break; 2540 break;
2499 msr = find_msr_entry(vmx, msr_index); 2541 msr = find_msr_entry(vmx, msr_index);
2500 if (msr) { 2542 if (msr) {
@@ -5302,9 +5344,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
5302 5344
5303 /* It is a write fault? */ 5345 /* It is a write fault? */
5304 error_code = exit_qualification & (1U << 1); 5346 error_code = exit_qualification & (1U << 1);
5347 /* It is a fetch fault? */
5348 error_code |= (exit_qualification & (1U << 2)) << 2;
5305 /* ept page table is present? */ 5349 /* ept page table is present? */
5306 error_code |= (exit_qualification >> 3) & 0x1; 5350 error_code |= (exit_qualification >> 3) & 0x1;
5307 5351
5352 vcpu->arch.exit_qualification = exit_qualification;
5353
5308 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); 5354 return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5309} 5355}
5310 5356
@@ -5438,7 +5484,8 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5438 5484
5439 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE); 5485 err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
5440 5486
5441 if (err == EMULATE_DO_MMIO) { 5487 if (err == EMULATE_USER_EXIT) {
5488 ++vcpu->stat.mmio_exits;
5442 ret = 0; 5489 ret = 0;
5443 goto out; 5490 goto out;
5444 } 5491 }
@@ -5567,8 +5614,47 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
5567 free_loaded_vmcs(&vmx->vmcs01); 5614 free_loaded_vmcs(&vmx->vmcs01);
5568} 5615}
5569 5616
5617/*
5618 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
5619 * set the success or error code of an emulated VMX instruction, as specified
5620 * by Vol 2B, VMX Instruction Reference, "Conventions".
5621 */
5622static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5623{
5624 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5625 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5626 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5627}
5628
5629static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5630{
5631 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5632 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5633 X86_EFLAGS_SF | X86_EFLAGS_OF))
5634 | X86_EFLAGS_CF);
5635}
5636
5570static void nested_vmx_failValid(struct kvm_vcpu *vcpu, 5637static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5571 u32 vm_instruction_error); 5638 u32 vm_instruction_error)
5639{
5640 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5641 /*
5642 * failValid writes the error number to the current VMCS, which
5643 * can't be done there isn't a current VMCS.
5644 */
5645 nested_vmx_failInvalid(vcpu);
5646 return;
5647 }
5648 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5649 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5650 X86_EFLAGS_SF | X86_EFLAGS_OF))
5651 | X86_EFLAGS_ZF);
5652 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5653 /*
5654 * We don't need to force a shadow sync because
5655 * VM_INSTRUCTION_ERROR is not shadowed
5656 */
5657}
5572 5658
5573/* 5659/*
5574 * Emulate the VMXON instruction. 5660 * Emulate the VMXON instruction.
@@ -5583,6 +5669,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
5583 struct kvm_segment cs; 5669 struct kvm_segment cs;
5584 struct vcpu_vmx *vmx = to_vmx(vcpu); 5670 struct vcpu_vmx *vmx = to_vmx(vcpu);
5585 struct vmcs *shadow_vmcs; 5671 struct vmcs *shadow_vmcs;
5672 const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
5673 | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
5586 5674
5587 /* The Intel VMX Instruction Reference lists a bunch of bits that 5675 /* The Intel VMX Instruction Reference lists a bunch of bits that
5588 * are prerequisite to running VMXON, most notably cr4.VMXE must be 5676 * are prerequisite to running VMXON, most notably cr4.VMXE must be
@@ -5611,6 +5699,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
5611 skip_emulated_instruction(vcpu); 5699 skip_emulated_instruction(vcpu);
5612 return 1; 5700 return 1;
5613 } 5701 }
5702
5703 if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5704 != VMXON_NEEDED_FEATURES) {
5705 kvm_inject_gp(vcpu, 0);
5706 return 1;
5707 }
5708
5614 if (enable_shadow_vmcs) { 5709 if (enable_shadow_vmcs) {
5615 shadow_vmcs = alloc_vmcs(); 5710 shadow_vmcs = alloc_vmcs();
5616 if (!shadow_vmcs) 5711 if (!shadow_vmcs)
@@ -5628,6 +5723,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
5628 vmx->nested.vmxon = true; 5723 vmx->nested.vmxon = true;
5629 5724
5630 skip_emulated_instruction(vcpu); 5725 skip_emulated_instruction(vcpu);
5726 nested_vmx_succeed(vcpu);
5631 return 1; 5727 return 1;
5632} 5728}
5633 5729
@@ -5712,6 +5808,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
5712 return 1; 5808 return 1;
5713 free_nested(to_vmx(vcpu)); 5809 free_nested(to_vmx(vcpu));
5714 skip_emulated_instruction(vcpu); 5810 skip_emulated_instruction(vcpu);
5811 nested_vmx_succeed(vcpu);
5715 return 1; 5812 return 1;
5716} 5813}
5717 5814
@@ -5768,48 +5865,6 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5768 return 0; 5865 return 0;
5769} 5866}
5770 5867
5771/*
5772 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
5773 * set the success or error code of an emulated VMX instruction, as specified
5774 * by Vol 2B, VMX Instruction Reference, "Conventions".
5775 */
5776static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5777{
5778 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5779 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5780 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5781}
5782
5783static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5784{
5785 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5786 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5787 X86_EFLAGS_SF | X86_EFLAGS_OF))
5788 | X86_EFLAGS_CF);
5789}
5790
5791static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5792 u32 vm_instruction_error)
5793{
5794 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5795 /*
5796 * failValid writes the error number to the current VMCS, which
5797 * can't be done there isn't a current VMCS.
5798 */
5799 nested_vmx_failInvalid(vcpu);
5800 return;
5801 }
5802 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5803 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5804 X86_EFLAGS_SF | X86_EFLAGS_OF))
5805 | X86_EFLAGS_ZF);
5806 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5807 /*
5808 * We don't need to force a shadow sync because
5809 * VM_INSTRUCTION_ERROR is not shadowed
5810 */
5811}
5812
5813/* Emulate the VMCLEAR instruction */ 5868/* Emulate the VMCLEAR instruction */
5814static int handle_vmclear(struct kvm_vcpu *vcpu) 5869static int handle_vmclear(struct kvm_vcpu *vcpu)
5815{ 5870{
@@ -5972,8 +6027,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
5972 unsigned long field; 6027 unsigned long field;
5973 u64 field_value; 6028 u64 field_value;
5974 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs; 6029 struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
5975 unsigned long *fields = (unsigned long *)shadow_read_write_fields; 6030 const unsigned long *fields = shadow_read_write_fields;
5976 int num_fields = max_shadow_read_write_fields; 6031 const int num_fields = max_shadow_read_write_fields;
5977 6032
5978 vmcs_load(shadow_vmcs); 6033 vmcs_load(shadow_vmcs);
5979 6034
@@ -6002,12 +6057,11 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6002 6057
6003static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 6058static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
6004{ 6059{
6005 unsigned long *fields[] = { 6060 const unsigned long *fields[] = {
6006 (unsigned long *)shadow_read_write_fields, 6061 shadow_read_write_fields,
6007 (unsigned long *)shadow_read_only_fields 6062 shadow_read_only_fields
6008 }; 6063 };
6009 int num_lists = ARRAY_SIZE(fields); 6064 const int max_fields[] = {
6010 int max_fields[] = {
6011 max_shadow_read_write_fields, 6065 max_shadow_read_write_fields,
6012 max_shadow_read_only_fields 6066 max_shadow_read_only_fields
6013 }; 6067 };
@@ -6018,7 +6072,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
6018 6072
6019 vmcs_load(shadow_vmcs); 6073 vmcs_load(shadow_vmcs);
6020 6074
6021 for (q = 0; q < num_lists; q++) { 6075 for (q = 0; q < ARRAY_SIZE(fields); q++) {
6022 for (i = 0; i < max_fields[q]; i++) { 6076 for (i = 0; i < max_fields[q]; i++) {
6023 field = fields[q][i]; 6077 field = fields[q][i];
6024 vmcs12_read_any(&vmx->vcpu, field, &field_value); 6078 vmcs12_read_any(&vmx->vcpu, field, &field_value);
@@ -6248,6 +6302,74 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
6248 return 1; 6302 return 1;
6249} 6303}
6250 6304
6305/* Emulate the INVEPT instruction */
6306static int handle_invept(struct kvm_vcpu *vcpu)
6307{
6308 u32 vmx_instruction_info, types;
6309 unsigned long type;
6310 gva_t gva;
6311 struct x86_exception e;
6312 struct {
6313 u64 eptp, gpa;
6314 } operand;
6315 u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
6316
6317 if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
6318 !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
6319 kvm_queue_exception(vcpu, UD_VECTOR);
6320 return 1;
6321 }
6322
6323 if (!nested_vmx_check_permission(vcpu))
6324 return 1;
6325
6326 if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
6327 kvm_queue_exception(vcpu, UD_VECTOR);
6328 return 1;
6329 }
6330
6331 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6332 type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
6333
6334 types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
6335
6336 if (!(types & (1UL << type))) {
6337 nested_vmx_failValid(vcpu,
6338 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
6339 return 1;
6340 }
6341
6342 /* According to the Intel VMX instruction reference, the memory
6343 * operand is read even if it isn't needed (e.g., for type==global)
6344 */
6345 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6346 vmx_instruction_info, &gva))
6347 return 1;
6348 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
6349 sizeof(operand), &e)) {
6350 kvm_inject_page_fault(vcpu, &e);
6351 return 1;
6352 }
6353
6354 switch (type) {
6355 case VMX_EPT_EXTENT_CONTEXT:
6356 if ((operand.eptp & eptp_mask) !=
6357 (nested_ept_get_cr3(vcpu) & eptp_mask))
6358 break;
6359 case VMX_EPT_EXTENT_GLOBAL:
6360 kvm_mmu_sync_roots(vcpu);
6361 kvm_mmu_flush_tlb(vcpu);
6362 nested_vmx_succeed(vcpu);
6363 break;
6364 default:
6365 BUG_ON(1);
6366 break;
6367 }
6368
6369 skip_emulated_instruction(vcpu);
6370 return 1;
6371}
6372
6251/* 6373/*
6252 * The exit handlers return 1 if the exit was handled fully and guest execution 6374 * The exit handlers return 1 if the exit was handled fully and guest execution
6253 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 6375 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -6292,6 +6414,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6292 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause, 6414 [EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
6293 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, 6415 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
6294 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, 6416 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
6417 [EXIT_REASON_INVEPT] = handle_invept,
6295}; 6418};
6296 6419
6297static const int kvm_vmx_max_exit_handlers = 6420static const int kvm_vmx_max_exit_handlers =
@@ -6518,6 +6641,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
6518 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: 6641 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
6519 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: 6642 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
6520 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 6643 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6644 case EXIT_REASON_INVEPT:
6521 /* 6645 /*
6522 * VMX instructions trap unconditionally. This allows L1 to 6646 * VMX instructions trap unconditionally. This allows L1 to
6523 * emulate them for its L2 guest, i.e., allows 3-level nesting! 6647 * emulate them for its L2 guest, i.e., allows 3-level nesting!
@@ -6550,7 +6674,20 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
6550 return nested_cpu_has2(vmcs12, 6674 return nested_cpu_has2(vmcs12,
6551 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES); 6675 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
6552 case EXIT_REASON_EPT_VIOLATION: 6676 case EXIT_REASON_EPT_VIOLATION:
6677 /*
6678 * L0 always deals with the EPT violation. If nested EPT is
6679 * used, and the nested mmu code discovers that the address is
6680 * missing in the guest EPT table (EPT12), the EPT violation
6681 * will be injected with nested_ept_inject_page_fault()
6682 */
6683 return 0;
6553 case EXIT_REASON_EPT_MISCONFIG: 6684 case EXIT_REASON_EPT_MISCONFIG:
6685 /*
6686 * L2 never uses directly L1's EPT, but rather L0's own EPT
6687 * table (shadow on EPT) or a merged EPT table that L0 built
6688 * (EPT on EPT). So any problems with the structure of the
6689 * table is L0's fault.
6690 */
6554 return 0; 6691 return 0;
6555 case EXIT_REASON_PREEMPTION_TIMER: 6692 case EXIT_REASON_PREEMPTION_TIMER:
6556 return vmcs12->pin_based_vm_exec_control & 6693 return vmcs12->pin_based_vm_exec_control &
@@ -6638,7 +6775,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6638 6775
6639 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked && 6776 if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
6640 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis( 6777 !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
6641 get_vmcs12(vcpu), vcpu)))) { 6778 get_vmcs12(vcpu))))) {
6642 if (vmx_interrupt_allowed(vcpu)) { 6779 if (vmx_interrupt_allowed(vcpu)) {
6643 vmx->soft_vnmi_blocked = 0; 6780 vmx->soft_vnmi_blocked = 0;
6644 } else if (vmx->vnmi_blocked_time > 1000000000LL && 6781 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -7326,6 +7463,48 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
7326 entry->ecx |= bit(X86_FEATURE_VMX); 7463 entry->ecx |= bit(X86_FEATURE_VMX);
7327} 7464}
7328 7465
7466static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
7467 struct x86_exception *fault)
7468{
7469 struct vmcs12 *vmcs12;
7470 nested_vmx_vmexit(vcpu);
7471 vmcs12 = get_vmcs12(vcpu);
7472
7473 if (fault->error_code & PFERR_RSVD_MASK)
7474 vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
7475 else
7476 vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
7477 vmcs12->exit_qualification = vcpu->arch.exit_qualification;
7478 vmcs12->guest_physical_address = fault->address;
7479}
7480
7481/* Callbacks for nested_ept_init_mmu_context: */
7482
7483static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
7484{
7485 /* return the page table to be shadowed - in our case, EPT12 */
7486 return get_vmcs12(vcpu)->ept_pointer;
7487}
7488
7489static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
7490{
7491 int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
7492 nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
7493
7494 vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
7495 vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
7496 vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
7497
7498 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
7499
7500 return r;
7501}
7502
7503static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
7504{
7505 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
7506}
7507
7329/* 7508/*
7330 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 7509 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
7331 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 7510 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7388,7 +7567,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7388 vmcs12->guest_interruptibility_info); 7567 vmcs12->guest_interruptibility_info);
7389 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); 7568 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
7390 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); 7569 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
7391 vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags); 7570 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
7392 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 7571 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
7393 vmcs12->guest_pending_dbg_exceptions); 7572 vmcs12->guest_pending_dbg_exceptions);
7394 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp); 7573 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
@@ -7508,15 +7687,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7508 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask; 7687 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
7509 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); 7688 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
7510 7689
7511 /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */ 7690 /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
7512 vmcs_write32(VM_EXIT_CONTROLS, 7691 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
7513 vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl); 7692 * bits are further modified by vmx_set_efer() below.
7514 vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls | 7693 */
7694 vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
7695
7696 /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
7697 * emulated by vmx_set_efer(), below.
7698 */
7699 vmcs_write32(VM_ENTRY_CONTROLS,
7700 (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
7701 ~VM_ENTRY_IA32E_MODE) |
7515 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE)); 7702 (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
7516 7703
7517 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) 7704 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
7518 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat); 7705 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
7519 else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) 7706 vcpu->arch.pat = vmcs12->guest_ia32_pat;
7707 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
7520 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); 7708 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
7521 7709
7522 7710
@@ -7538,6 +7726,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7538 vmx_flush_tlb(vcpu); 7726 vmx_flush_tlb(vcpu);
7539 } 7727 }
7540 7728
7729 if (nested_cpu_has_ept(vmcs12)) {
7730 kvm_mmu_unload(vcpu);
7731 nested_ept_init_mmu_context(vcpu);
7732 }
7733
7541 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) 7734 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
7542 vcpu->arch.efer = vmcs12->guest_ia32_efer; 7735 vcpu->arch.efer = vmcs12->guest_ia32_efer;
7543 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) 7736 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
@@ -7565,6 +7758,16 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7565 kvm_set_cr3(vcpu, vmcs12->guest_cr3); 7758 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
7566 kvm_mmu_reset_context(vcpu); 7759 kvm_mmu_reset_context(vcpu);
7567 7760
7761 /*
7762 * L1 may access the L2's PDPTR, so save them to construct vmcs12
7763 */
7764 if (enable_ept) {
7765 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
7766 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7767 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7768 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7769 }
7770
7568 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7771 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
7569 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip); 7772 kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
7570} 7773}
@@ -7887,6 +8090,22 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7887 vmcs12->guest_pending_dbg_exceptions = 8090 vmcs12->guest_pending_dbg_exceptions =
7888 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS); 8091 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
7889 8092
8093 /*
8094 * In some cases (usually, nested EPT), L2 is allowed to change its
8095 * own CR3 without exiting. If it has changed it, we must keep it.
8096 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
8097 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
8098 *
8099 * Additionally, restore L2's PDPTR to vmcs12.
8100 */
8101 if (enable_ept) {
8102 vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
8103 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
8104 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
8105 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
8106 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
8107 }
8108
7890 vmcs12->vm_entry_controls = 8109 vmcs12->vm_entry_controls =
7891 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | 8110 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
7892 (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE); 8111 (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
@@ -7948,6 +8167,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7948static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, 8167static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
7949 struct vmcs12 *vmcs12) 8168 struct vmcs12 *vmcs12)
7950{ 8169{
8170 struct kvm_segment seg;
8171
7951 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) 8172 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
7952 vcpu->arch.efer = vmcs12->host_ia32_efer; 8173 vcpu->arch.efer = vmcs12->host_ia32_efer;
7953 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) 8174 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
@@ -7982,7 +8203,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
7982 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); 8203 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
7983 kvm_set_cr4(vcpu, vmcs12->host_cr4); 8204 kvm_set_cr4(vcpu, vmcs12->host_cr4);
7984 8205
7985 /* shadow page tables on either EPT or shadow page tables */ 8206 if (nested_cpu_has_ept(vmcs12))
8207 nested_ept_uninit_mmu_context(vcpu);
8208
7986 kvm_set_cr3(vcpu, vmcs12->host_cr3); 8209 kvm_set_cr3(vcpu, vmcs12->host_cr3);
7987 kvm_mmu_reset_context(vcpu); 8210 kvm_mmu_reset_context(vcpu);
7988 8211
@@ -8001,23 +8224,61 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8001 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); 8224 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
8002 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); 8225 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
8003 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); 8226 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
8004 vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base); 8227
8005 vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base); 8228 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
8006 vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
8007 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
8008 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
8009 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
8010 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
8011 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
8012 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
8013 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
8014
8015 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
8016 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat); 8229 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
8230 vcpu->arch.pat = vmcs12->host_ia32_pat;
8231 }
8017 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) 8232 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
8018 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL, 8233 vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
8019 vmcs12->host_ia32_perf_global_ctrl); 8234 vmcs12->host_ia32_perf_global_ctrl);
8020 8235
8236 /* Set L1 segment info according to Intel SDM
8237 27.5.2 Loading Host Segment and Descriptor-Table Registers */
8238 seg = (struct kvm_segment) {
8239 .base = 0,
8240 .limit = 0xFFFFFFFF,
8241 .selector = vmcs12->host_cs_selector,
8242 .type = 11,
8243 .present = 1,
8244 .s = 1,
8245 .g = 1
8246 };
8247 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
8248 seg.l = 1;
8249 else
8250 seg.db = 1;
8251 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
8252 seg = (struct kvm_segment) {
8253 .base = 0,
8254 .limit = 0xFFFFFFFF,
8255 .type = 3,
8256 .present = 1,
8257 .s = 1,
8258 .db = 1,
8259 .g = 1
8260 };
8261 seg.selector = vmcs12->host_ds_selector;
8262 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
8263 seg.selector = vmcs12->host_es_selector;
8264 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
8265 seg.selector = vmcs12->host_ss_selector;
8266 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
8267 seg.selector = vmcs12->host_fs_selector;
8268 seg.base = vmcs12->host_fs_base;
8269 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
8270 seg.selector = vmcs12->host_gs_selector;
8271 seg.base = vmcs12->host_gs_base;
8272 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
8273 seg = (struct kvm_segment) {
8274 .base = vmcs12->host_tr_base,
8275 .limit = 0x67,
8276 .selector = vmcs12->host_tr_selector,
8277 .type = 11,
8278 .present = 1
8279 };
8280 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
8281
8021 kvm_set_dr(vcpu, 7, 0x400); 8282 kvm_set_dr(vcpu, 7, 0x400);
8022 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 8283 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
8023} 8284}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d21bce505315..e5ca72a5cdb6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -682,17 +682,6 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
682 */ 682 */
683 } 683 }
684 684
685 /*
686 * Does the new cr3 value map to physical memory? (Note, we
687 * catch an invalid cr3 even in real-mode, because it would
688 * cause trouble later on when we turn on paging anyway.)
689 *
690 * A real CPU would silently accept an invalid cr3 and would
691 * attempt to use it - with largely undefined (and often hard
692 * to debug) behavior on the guest side.
693 */
694 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
695 return 1;
696 vcpu->arch.cr3 = cr3; 685 vcpu->arch.cr3 = cr3;
697 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); 686 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
698 vcpu->arch.mmu.new_cr3(vcpu); 687 vcpu->arch.mmu.new_cr3(vcpu);
@@ -850,7 +839,8 @@ static u32 msrs_to_save[] = {
850#ifdef CONFIG_X86_64 839#ifdef CONFIG_X86_64
851 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, 840 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
852#endif 841#endif
853 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA 842 MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
843 MSR_IA32_FEATURE_CONTROL
854}; 844};
855 845
856static unsigned num_msrs_to_save; 846static unsigned num_msrs_to_save;
@@ -1457,6 +1447,29 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
1457#endif 1447#endif
1458} 1448}
1459 1449
1450static void kvm_gen_update_masterclock(struct kvm *kvm)
1451{
1452#ifdef CONFIG_X86_64
1453 int i;
1454 struct kvm_vcpu *vcpu;
1455 struct kvm_arch *ka = &kvm->arch;
1456
1457 spin_lock(&ka->pvclock_gtod_sync_lock);
1458 kvm_make_mclock_inprogress_request(kvm);
1459 /* no guest entries from this point */
1460 pvclock_update_vm_gtod_copy(kvm);
1461
1462 kvm_for_each_vcpu(i, vcpu, kvm)
1463 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
1464
1465 /* guest entries allowed */
1466 kvm_for_each_vcpu(i, vcpu, kvm)
1467 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
1468
1469 spin_unlock(&ka->pvclock_gtod_sync_lock);
1470#endif
1471}
1472
1460static int kvm_guest_time_update(struct kvm_vcpu *v) 1473static int kvm_guest_time_update(struct kvm_vcpu *v)
1461{ 1474{
1462 unsigned long flags, this_tsc_khz; 1475 unsigned long flags, this_tsc_khz;
@@ -3806,6 +3819,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
3806 delta = user_ns.clock - now_ns; 3819 delta = user_ns.clock - now_ns;
3807 local_irq_enable(); 3820 local_irq_enable();
3808 kvm->arch.kvmclock_offset = delta; 3821 kvm->arch.kvmclock_offset = delta;
3822 kvm_gen_update_masterclock(kvm);
3809 break; 3823 break;
3810 } 3824 }
3811 case KVM_GET_CLOCK: { 3825 case KVM_GET_CLOCK: {
@@ -4955,6 +4969,97 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
4955static int complete_emulated_mmio(struct kvm_vcpu *vcpu); 4969static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
4956static int complete_emulated_pio(struct kvm_vcpu *vcpu); 4970static int complete_emulated_pio(struct kvm_vcpu *vcpu);
4957 4971
4972static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
4973 unsigned long *db)
4974{
4975 u32 dr6 = 0;
4976 int i;
4977 u32 enable, rwlen;
4978
4979 enable = dr7;
4980 rwlen = dr7 >> 16;
4981 for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
4982 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
4983 dr6 |= (1 << i);
4984 return dr6;
4985}
4986
4987static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
4988{
4989 struct kvm_run *kvm_run = vcpu->run;
4990
4991 /*
4992 * Use the "raw" value to see if TF was passed to the processor.
4993 * Note that the new value of the flags has not been saved yet.
4994 *
4995 * This is correct even for TF set by the guest, because "the
4996 * processor will not generate this exception after the instruction
4997 * that sets the TF flag".
4998 */
4999 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5000
5001 if (unlikely(rflags & X86_EFLAGS_TF)) {
5002 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5003 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
5004 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
5005 kvm_run->debug.arch.exception = DB_VECTOR;
5006 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5007 *r = EMULATE_USER_EXIT;
5008 } else {
5009 vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
5010 /*
5011 * "Certain debug exceptions may clear bit 0-3. The
5012 * remaining contents of the DR6 register are never
5013 * cleared by the processor".
5014 */
5015 vcpu->arch.dr6 &= ~15;
5016 vcpu->arch.dr6 |= DR6_BS;
5017 kvm_queue_exception(vcpu, DB_VECTOR);
5018 }
5019 }
5020}
5021
5022static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
5023{
5024 struct kvm_run *kvm_run = vcpu->run;
5025 unsigned long eip = vcpu->arch.emulate_ctxt.eip;
5026 u32 dr6 = 0;
5027
5028 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
5029 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
5030 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5031 vcpu->arch.guest_debug_dr7,
5032 vcpu->arch.eff_db);
5033
5034 if (dr6 != 0) {
5035 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
5036 kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
5037 get_segment_base(vcpu, VCPU_SREG_CS);
5038
5039 kvm_run->debug.arch.exception = DB_VECTOR;
5040 kvm_run->exit_reason = KVM_EXIT_DEBUG;
5041 *r = EMULATE_USER_EXIT;
5042 return true;
5043 }
5044 }
5045
5046 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) {
5047 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
5048 vcpu->arch.dr7,
5049 vcpu->arch.db);
5050
5051 if (dr6 != 0) {
5052 vcpu->arch.dr6 &= ~15;
5053 vcpu->arch.dr6 |= dr6;
5054 kvm_queue_exception(vcpu, DB_VECTOR);
5055 *r = EMULATE_DONE;
5056 return true;
5057 }
5058 }
5059
5060 return false;
5061}
5062
4958int x86_emulate_instruction(struct kvm_vcpu *vcpu, 5063int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4959 unsigned long cr2, 5064 unsigned long cr2,
4960 int emulation_type, 5065 int emulation_type,
@@ -4975,6 +5080,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4975 5080
4976 if (!(emulation_type & EMULTYPE_NO_DECODE)) { 5081 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4977 init_emulate_ctxt(vcpu); 5082 init_emulate_ctxt(vcpu);
5083
5084 /*
5085 * We will reenter on the same instruction since
5086 * we do not set complete_userspace_io. This does not
5087 * handle watchpoints yet, those would be handled in
5088 * the emulate_ops.
5089 */
5090 if (kvm_vcpu_check_breakpoint(vcpu, &r))
5091 return r;
5092
4978 ctxt->interruptibility = 0; 5093 ctxt->interruptibility = 0;
4979 ctxt->have_exception = false; 5094 ctxt->have_exception = false;
4980 ctxt->perm_ok = false; 5095 ctxt->perm_ok = false;
@@ -5031,17 +5146,18 @@ restart:
5031 inject_emulated_exception(vcpu); 5146 inject_emulated_exception(vcpu);
5032 r = EMULATE_DONE; 5147 r = EMULATE_DONE;
5033 } else if (vcpu->arch.pio.count) { 5148 } else if (vcpu->arch.pio.count) {
5034 if (!vcpu->arch.pio.in) 5149 if (!vcpu->arch.pio.in) {
5150 /* FIXME: return into emulator if single-stepping. */
5035 vcpu->arch.pio.count = 0; 5151 vcpu->arch.pio.count = 0;
5036 else { 5152 } else {
5037 writeback = false; 5153 writeback = false;
5038 vcpu->arch.complete_userspace_io = complete_emulated_pio; 5154 vcpu->arch.complete_userspace_io = complete_emulated_pio;
5039 } 5155 }
5040 r = EMULATE_DO_MMIO; 5156 r = EMULATE_USER_EXIT;
5041 } else if (vcpu->mmio_needed) { 5157 } else if (vcpu->mmio_needed) {
5042 if (!vcpu->mmio_is_write) 5158 if (!vcpu->mmio_is_write)
5043 writeback = false; 5159 writeback = false;
5044 r = EMULATE_DO_MMIO; 5160 r = EMULATE_USER_EXIT;
5045 vcpu->arch.complete_userspace_io = complete_emulated_mmio; 5161 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
5046 } else if (r == EMULATION_RESTART) 5162 } else if (r == EMULATION_RESTART)
5047 goto restart; 5163 goto restart;
@@ -5050,10 +5166,12 @@ restart:
5050 5166
5051 if (writeback) { 5167 if (writeback) {
5052 toggle_interruptibility(vcpu, ctxt->interruptibility); 5168 toggle_interruptibility(vcpu, ctxt->interruptibility);
5053 kvm_set_rflags(vcpu, ctxt->eflags);
5054 kvm_make_request(KVM_REQ_EVENT, vcpu); 5169 kvm_make_request(KVM_REQ_EVENT, vcpu);
5055 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5170 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5056 kvm_rip_write(vcpu, ctxt->eip); 5171 kvm_rip_write(vcpu, ctxt->eip);
5172 if (r == EMULATE_DONE)
5173 kvm_vcpu_check_singlestep(vcpu, &r);
5174 kvm_set_rflags(vcpu, ctxt->eflags);
5057 } else 5175 } else
5058 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 5176 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5059 5177
@@ -5347,7 +5465,7 @@ static struct notifier_block pvclock_gtod_notifier = {
5347int kvm_arch_init(void *opaque) 5465int kvm_arch_init(void *opaque)
5348{ 5466{
5349 int r; 5467 int r;
5350 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; 5468 struct kvm_x86_ops *ops = opaque;
5351 5469
5352 if (kvm_x86_ops) { 5470 if (kvm_x86_ops) {
5353 printk(KERN_ERR "kvm: already loaded the other module\n"); 5471 printk(KERN_ERR "kvm: already loaded the other module\n");
@@ -5495,6 +5613,23 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
5495 return 1; 5613 return 1;
5496} 5614}
5497 5615
5616/*
5617 * kvm_pv_kick_cpu_op: Kick a vcpu.
5618 *
5619 * @apicid - apicid of vcpu to be kicked.
5620 */
5621static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
5622{
5623 struct kvm_lapic_irq lapic_irq;
5624
5625 lapic_irq.shorthand = 0;
5626 lapic_irq.dest_mode = 0;
5627 lapic_irq.dest_id = apicid;
5628
5629 lapic_irq.delivery_mode = APIC_DM_REMRD;
5630 kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
5631}
5632
5498int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 5633int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5499{ 5634{
5500 unsigned long nr, a0, a1, a2, a3, ret; 5635 unsigned long nr, a0, a1, a2, a3, ret;
@@ -5528,6 +5663,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5528 case KVM_HC_VAPIC_POLL_IRQ: 5663 case KVM_HC_VAPIC_POLL_IRQ:
5529 ret = 0; 5664 ret = 0;
5530 break; 5665 break;
5666 case KVM_HC_KICK_CPU:
5667 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
5668 ret = 0;
5669 break;
5531 default: 5670 default:
5532 ret = -KVM_ENOSYS; 5671 ret = -KVM_ENOSYS;
5533 break; 5672 break;
@@ -5689,29 +5828,6 @@ static void process_nmi(struct kvm_vcpu *vcpu)
5689 kvm_make_request(KVM_REQ_EVENT, vcpu); 5828 kvm_make_request(KVM_REQ_EVENT, vcpu);
5690} 5829}
5691 5830
5692static void kvm_gen_update_masterclock(struct kvm *kvm)
5693{
5694#ifdef CONFIG_X86_64
5695 int i;
5696 struct kvm_vcpu *vcpu;
5697 struct kvm_arch *ka = &kvm->arch;
5698
5699 spin_lock(&ka->pvclock_gtod_sync_lock);
5700 kvm_make_mclock_inprogress_request(kvm);
5701 /* no guest entries from this point */
5702 pvclock_update_vm_gtod_copy(kvm);
5703
5704 kvm_for_each_vcpu(i, vcpu, kvm)
5705 set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
5706
5707 /* guest entries allowed */
5708 kvm_for_each_vcpu(i, vcpu, kvm)
5709 clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
5710
5711 spin_unlock(&ka->pvclock_gtod_sync_lock);
5712#endif
5713}
5714
5715static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) 5831static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
5716{ 5832{
5717 u64 eoi_exit_bitmap[4]; 5833 u64 eoi_exit_bitmap[4];
@@ -5950,6 +6066,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5950 kvm_apic_accept_events(vcpu); 6066 kvm_apic_accept_events(vcpu);
5951 switch(vcpu->arch.mp_state) { 6067 switch(vcpu->arch.mp_state) {
5952 case KVM_MP_STATE_HALTED: 6068 case KVM_MP_STATE_HALTED:
6069 vcpu->arch.pv.pv_unhalted = false;
5953 vcpu->arch.mp_state = 6070 vcpu->arch.mp_state =
5954 KVM_MP_STATE_RUNNABLE; 6071 KVM_MP_STATE_RUNNABLE;
5955 case KVM_MP_STATE_RUNNABLE: 6072 case KVM_MP_STATE_RUNNABLE:
@@ -6061,6 +6178,8 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
6061 6178
6062 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { 6179 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
6063 vcpu->mmio_needed = 0; 6180 vcpu->mmio_needed = 0;
6181
6182 /* FIXME: return into emulator if single-stepping. */
6064 if (vcpu->mmio_is_write) 6183 if (vcpu->mmio_is_write)
6065 return 1; 6184 return 1;
6066 vcpu->mmio_read_completed = 1; 6185 vcpu->mmio_read_completed = 1;
@@ -6249,7 +6368,12 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
6249 struct kvm_mp_state *mp_state) 6368 struct kvm_mp_state *mp_state)
6250{ 6369{
6251 kvm_apic_accept_events(vcpu); 6370 kvm_apic_accept_events(vcpu);
6252 mp_state->mp_state = vcpu->arch.mp_state; 6371 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
6372 vcpu->arch.pv.pv_unhalted)
6373 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
6374 else
6375 mp_state->mp_state = vcpu->arch.mp_state;
6376
6253 return 0; 6377 return 0;
6254} 6378}
6255 6379
@@ -6770,6 +6894,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6770 BUG_ON(vcpu->kvm == NULL); 6894 BUG_ON(vcpu->kvm == NULL);
6771 kvm = vcpu->kvm; 6895 kvm = vcpu->kvm;
6772 6896
6897 vcpu->arch.pv.pv_unhalted = false;
6773 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 6898 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6774 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu)) 6899 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6775 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; 6900 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -7019,6 +7144,15 @@ out_free:
7019 return -ENOMEM; 7144 return -ENOMEM;
7020} 7145}
7021 7146
7147void kvm_arch_memslots_updated(struct kvm *kvm)
7148{
7149 /*
7150 * memslots->generation has been incremented.
7151 * mmio generation may have reached its maximum value.
7152 */
7153 kvm_mmu_invalidate_mmio_sptes(kvm);
7154}
7155
7022int kvm_arch_prepare_memory_region(struct kvm *kvm, 7156int kvm_arch_prepare_memory_region(struct kvm *kvm,
7023 struct kvm_memory_slot *memslot, 7157 struct kvm_memory_slot *memslot,
7024 struct kvm_userspace_memory_region *mem, 7158 struct kvm_userspace_memory_region *mem,
@@ -7079,11 +7213,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
7079 */ 7213 */
7080 if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) 7214 if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
7081 kvm_mmu_slot_remove_write_access(kvm, mem->slot); 7215 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
7082 /*
7083 * If memory slot is created, or moved, we need to clear all
7084 * mmio sptes.
7085 */
7086 kvm_mmu_invalidate_mmio_sptes(kvm);
7087} 7216}
7088 7217
7089void kvm_arch_flush_shadow_all(struct kvm *kvm) 7218void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -7103,6 +7232,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
7103 !vcpu->arch.apf.halted) 7232 !vcpu->arch.apf.halted)
7104 || !list_empty_careful(&vcpu->async_pf.done) 7233 || !list_empty_careful(&vcpu->async_pf.done)
7105 || kvm_apic_has_events(vcpu) 7234 || kvm_apic_has_events(vcpu)
7235 || vcpu->arch.pv.pv_unhalted
7106 || atomic_read(&vcpu->arch.nmi_queued) || 7236 || atomic_read(&vcpu->arch.nmi_queued) ||
7107 (kvm_arch_interrupt_allowed(vcpu) && 7237 (kvm_arch_interrupt_allowed(vcpu) &&
7108 kvm_cpu_has_interrupt(vcpu)); 7238 kvm_cpu_has_interrupt(vcpu));
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 25b7ae8d058a..7609e0e421ec 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#include <asm/checksum.h> 7#include <asm/checksum.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <asm/smap.h>
9 10
10/** 11/**
11 * csum_partial_copy_from_user - Copy and checksum from user space. 12 * csum_partial_copy_from_user - Copy and checksum from user space.
@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
52 len -= 2; 53 len -= 2;
53 } 54 }
54 } 55 }
56 stac();
55 isum = csum_partial_copy_generic((__force const void *)src, 57 isum = csum_partial_copy_generic((__force const void *)src,
56 dst, len, isum, errp, NULL); 58 dst, len, isum, errp, NULL);
59 clac();
57 if (unlikely(*errp)) 60 if (unlikely(*errp))
58 goto out_err; 61 goto out_err;
59 62
@@ -82,6 +85,8 @@ __wsum
82csum_partial_copy_to_user(const void *src, void __user *dst, 85csum_partial_copy_to_user(const void *src, void __user *dst,
83 int len, __wsum isum, int *errp) 86 int len, __wsum isum, int *errp)
84{ 87{
88 __wsum ret;
89
85 might_sleep(); 90 might_sleep();
86 91
87 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { 92 if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
105 } 110 }
106 111
107 *errp = 0; 112 *errp = 0;
108 return csum_partial_copy_generic(src, (void __force *)dst, 113 stac();
109 len, isum, NULL, errp); 114 ret = csum_partial_copy_generic(src, (void __force *)dst,
115 len, isum, NULL, errp);
116 clac();
117 return ret;
110} 118}
111EXPORT_SYMBOL(csum_partial_copy_to_user); 119EXPORT_SYMBOL(csum_partial_copy_to_user);
112 120
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 906fea315791..c905e89e19fe 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
68 * Since protection fault in copy_from/to_user is not a normal situation, 68 * Since protection fault in copy_from/to_user is not a normal situation,
69 * it is not necessary to optimize tail handling. 69 * it is not necessary to optimize tail handling.
70 */ 70 */
71unsigned long 71__visible unsigned long
72copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest) 72copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
73{ 73{
74 char c; 74 char c;
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 5d7e51f3fd28..533a85e3a07e 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1,10 +1,8 @@
1# x86 Opcode Maps 1# x86 Opcode Maps
2# 2#
3# This is (mostly) based on following documentations. 3# This is (mostly) based on following documentations.
4# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2 4# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
5# (#325383-040US, October 2011) 5# (#326018-047US, June 2013)
6# - Intel(R) Advanced Vector Extensions Programming Reference
7# (#319433-011,JUNE 2011).
8# 6#
9#<Opcode maps> 7#<Opcode maps>
10# Table: table-name 8# Table: table-name
@@ -29,6 +27,7 @@
29# - (F3): the last prefix is 0xF3 27# - (F3): the last prefix is 0xF3
30# - (F2): the last prefix is 0xF2 28# - (F2): the last prefix is 0xF2
31# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) 29# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
30# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
32 31
33Table: one byte opcode 32Table: one byte opcode
34Referrer: 33Referrer:
@@ -246,8 +245,8 @@ c2: RETN Iw (f64)
246c3: RETN 245c3: RETN
247c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) 246c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
248c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) 247c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
249c6: Grp11 Eb,Ib (1A) 248c6: Grp11A Eb,Ib (1A)
250c7: Grp11 Ev,Iz (1A) 249c7: Grp11B Ev,Iz (1A)
251c8: ENTER Iw,Ib 250c8: ENTER Iw,Ib
252c9: LEAVE (d64) 251c9: LEAVE (d64)
253ca: RETF Iw 252ca: RETF Iw
@@ -293,8 +292,8 @@ ef: OUT DX,eAX
293# 0xf0 - 0xff 292# 0xf0 - 0xff
294f0: LOCK (Prefix) 293f0: LOCK (Prefix)
295f1: 294f1:
296f2: REPNE (Prefix) 295f2: REPNE (Prefix) | XACQUIRE (Prefix)
297f3: REP/REPE (Prefix) 296f3: REP/REPE (Prefix) | XRELEASE (Prefix)
298f4: HLT 297f4: HLT
299f5: CMC 298f5: CMC
300f6: Grp3_1 Eb (1A) 299f6: Grp3_1 Eb (1A)
@@ -326,7 +325,8 @@ AVXcode: 1
3260a: 3250a:
3270b: UD2 (1B) 3260b: UD2 (1B)
3280c: 3270c:
3290d: NOP Ev | GrpP 328# AMD's prefetch group. Intel supports prefetchw(/1) only.
3290d: GrpP
3300e: FEMMS 3300e: FEMMS
331# 3DNow! uses the last imm byte as opcode extension. 331# 3DNow! uses the last imm byte as opcode extension.
3320f: 3DNow! Pq,Qq,Ib 3320f: 3DNow! Pq,Qq,Ib
@@ -729,12 +729,12 @@ dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
729dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) 729dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
730de: VAESDEC Vdq,Hdq,Wdq (66),(v1) 730de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
731df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) 731df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
732f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) 732f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
733f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) 733f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
734f2: ANDN Gy,By,Ey (v) 734f2: ANDN Gy,By,Ey (v)
735f3: Grp17 (1A) 735f3: Grp17 (1A)
736f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) 736f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
737f6: MULX By,Gy,rDX,Ey (F2),(v) 737f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
738f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) 738f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
739EndTable 739EndTable
740 740
@@ -861,8 +861,8 @@ EndTable
861 861
862GrpTable: Grp7 862GrpTable: Grp7
8630: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) 8630: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
8641: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) 8641: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
8652: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) 8652: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
8663: LIDT Ms 8663: LIDT Ms
8674: SMSW Mw/Rv 8674: SMSW Mw/Rv
8685: 8685:
@@ -880,15 +880,21 @@ EndTable
880GrpTable: Grp9 880GrpTable: Grp9
8811: CMPXCHG8B/16B Mq/Mdq 8811: CMPXCHG8B/16B Mq/Mdq
8826: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) 8826: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
8837: VMPTRST Mq | VMPTRST Mq (F3) 8837: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
884EndTable 884EndTable
885 885
886GrpTable: Grp10 886GrpTable: Grp10
887EndTable 887EndTable
888 888
889GrpTable: Grp11 889# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
890# Note: the operands are given by group opcode 890GrpTable: Grp11A
8910: MOV 8910: MOV Eb,Ib
8927: XABORT Ib (000),(11B)
893EndTable
894
895GrpTable: Grp11B
8960: MOV Eb,Iz
8977: XBEGIN Jz (000),(11B)
892EndTable 898EndTable
893 899
894GrpTable: Grp12 900GrpTable: Grp12
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 2ec29ac78ae6..04664cdb7fda 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num)
78 return __va(pfn << PAGE_SHIFT); 78 return __va(pfn << PAGE_SHIFT);
79} 79}
80 80
81/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ 81/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
82#define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE) 82#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
83RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); 83RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
84void __init early_alloc_pgt_buf(void) 84void __init early_alloc_pgt_buf(void)
85{ 85{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0215e2c563ef..799580cabc78 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -487,7 +487,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
487 unsigned long offset; 487 unsigned long offset;
488 resource_size_t last_addr; 488 resource_size_t last_addr;
489 unsigned int nrpages; 489 unsigned int nrpages;
490 enum fixed_addresses idx0, idx; 490 enum fixed_addresses idx;
491 int i, slot; 491 int i, slot;
492 492
493 WARN_ON(system_state != SYSTEM_BOOTING); 493 WARN_ON(system_state != SYSTEM_BOOTING);
@@ -540,8 +540,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
540 /* 540 /*
541 * Ok, go for it.. 541 * Ok, go for it..
542 */ 542 */
543 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; 543 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
544 idx = idx0;
545 while (nrpages > 0) { 544 while (nrpages > 0) {
546 early_set_fixmap(idx, phys_addr, prot); 545 early_set_fixmap(idx, phys_addr, prot);
547 phys_addr += PAGE_SIZE; 546 phys_addr += PAGE_SIZE;
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe26..25e7e1372bb2 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void)
112 */ 112 */
113void arch_pick_mmap_layout(struct mm_struct *mm) 113void arch_pick_mmap_layout(struct mm_struct *mm)
114{ 114{
115 mm->mmap_legacy_base = mmap_legacy_base();
116 mm->mmap_base = mmap_base();
117
115 if (mmap_is_legacy()) { 118 if (mmap_is_legacy()) {
116 mm->mmap_base = mmap_legacy_base(); 119 mm->mmap_base = mm->mmap_legacy_base;
117 mm->get_unmapped_area = arch_get_unmapped_area; 120 mm->get_unmapped_area = arch_get_unmapped_area;
118 } else { 121 } else {
119 mm->mmap_base = mmap_base();
120 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 122 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
121 } 123 }
122} 124}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index cdd0da9dd530..266ca912f62e 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -146,6 +146,7 @@ int __init
146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) 146acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
147{ 147{
148 u64 start, end; 148 u64 start, end;
149 u32 hotpluggable;
149 int node, pxm; 150 int node, pxm;
150 151
151 if (srat_disabled()) 152 if (srat_disabled())
@@ -154,7 +155,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
154 goto out_err_bad_srat; 155 goto out_err_bad_srat;
155 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) 156 if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
156 goto out_err; 157 goto out_err;
157 if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) 158 hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
159 if (hotpluggable && !save_add_info())
158 goto out_err; 160 goto out_err;
159 161
160 start = ma->base_address; 162 start = ma->base_address;
@@ -174,9 +176,10 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
174 176
175 node_set(node, numa_nodes_parsed); 177 node_set(node, numa_nodes_parsed);
176 178
177 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 179 pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n",
178 node, pxm, 180 node, pxm,
179 (unsigned long long) start, (unsigned long long) end - 1); 181 (unsigned long long) start, (unsigned long long) end - 1,
182 hotpluggable ? " hotplug" : "");
180 183
181 return 0; 184 return 0;
182out_err_bad_srat: 185out_err_bad_srat:
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 48768df2471a..6890d8498e0b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -403,7 +403,7 @@ static void nmi_cpu_down(void *dummy)
403 nmi_cpu_shutdown(dummy); 403 nmi_cpu_shutdown(dummy);
404} 404}
405 405
406static int nmi_create_files(struct super_block *sb, struct dentry *root) 406static int nmi_create_files(struct dentry *root)
407{ 407{
408 unsigned int i; 408 unsigned int i;
409 409
@@ -420,14 +420,14 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
420 continue; 420 continue;
421 421
422 snprintf(buf, sizeof(buf), "%d", i); 422 snprintf(buf, sizeof(buf), "%d", i);
423 dir = oprofilefs_mkdir(sb, root, buf); 423 dir = oprofilefs_mkdir(root, buf);
424 oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled); 424 oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
425 oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event); 425 oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
426 oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count); 426 oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
427 oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask); 427 oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
428 oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel); 428 oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
429 oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user); 429 oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
430 oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra); 430 oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
431 } 431 }
432 432
433 return 0; 433 return 0;
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index b2b94438ff05..50d86c0e9ba4 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -454,16 +454,16 @@ static void init_ibs(void)
454 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); 454 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
455} 455}
456 456
457static int (*create_arch_files)(struct super_block *sb, struct dentry *root); 457static int (*create_arch_files)(struct dentry *root);
458 458
459static int setup_ibs_files(struct super_block *sb, struct dentry *root) 459static int setup_ibs_files(struct dentry *root)
460{ 460{
461 struct dentry *dir; 461 struct dentry *dir;
462 int ret = 0; 462 int ret = 0;
463 463
464 /* architecture specific files */ 464 /* architecture specific files */
465 if (create_arch_files) 465 if (create_arch_files)
466 ret = create_arch_files(sb, root); 466 ret = create_arch_files(root);
467 467
468 if (ret) 468 if (ret)
469 return ret; 469 return ret;
@@ -479,26 +479,26 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
479 ibs_config.max_cnt_op = 250000; 479 ibs_config.max_cnt_op = 250000;
480 480
481 if (ibs_caps & IBS_CAPS_FETCHSAM) { 481 if (ibs_caps & IBS_CAPS_FETCHSAM) {
482 dir = oprofilefs_mkdir(sb, root, "ibs_fetch"); 482 dir = oprofilefs_mkdir(root, "ibs_fetch");
483 oprofilefs_create_ulong(sb, dir, "enable", 483 oprofilefs_create_ulong(dir, "enable",
484 &ibs_config.fetch_enabled); 484 &ibs_config.fetch_enabled);
485 oprofilefs_create_ulong(sb, dir, "max_count", 485 oprofilefs_create_ulong(dir, "max_count",
486 &ibs_config.max_cnt_fetch); 486 &ibs_config.max_cnt_fetch);
487 oprofilefs_create_ulong(sb, dir, "rand_enable", 487 oprofilefs_create_ulong(dir, "rand_enable",
488 &ibs_config.rand_en); 488 &ibs_config.rand_en);
489 } 489 }
490 490
491 if (ibs_caps & IBS_CAPS_OPSAM) { 491 if (ibs_caps & IBS_CAPS_OPSAM) {
492 dir = oprofilefs_mkdir(sb, root, "ibs_op"); 492 dir = oprofilefs_mkdir(root, "ibs_op");
493 oprofilefs_create_ulong(sb, dir, "enable", 493 oprofilefs_create_ulong(dir, "enable",
494 &ibs_config.op_enabled); 494 &ibs_config.op_enabled);
495 oprofilefs_create_ulong(sb, dir, "max_count", 495 oprofilefs_create_ulong(dir, "max_count",
496 &ibs_config.max_cnt_op); 496 &ibs_config.max_cnt_op);
497 if (ibs_caps & IBS_CAPS_OPCNT) 497 if (ibs_caps & IBS_CAPS_OPCNT)
498 oprofilefs_create_ulong(sb, dir, "dispatched_ops", 498 oprofilefs_create_ulong(dir, "dispatched_ops",
499 &ibs_config.dispatched_ops); 499 &ibs_config.dispatched_ops);
500 if (ibs_caps & IBS_CAPS_BRNTRGT) 500 if (ibs_caps & IBS_CAPS_BRNTRGT)
501 oprofilefs_create_ulong(sb, dir, "branch_target", 501 oprofilefs_create_ulong(dir, "branch_target",
502 &ibs_config.branch_target); 502 &ibs_config.branch_target);
503 } 503 }
504 504
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index d641897a1f4e..b30e937689d6 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -568,13 +568,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
568 */ 568 */
569 if (bus) { 569 if (bus) {
570 struct pci_bus *child; 570 struct pci_bus *child;
571 list_for_each_entry(child, &bus->children, node) { 571 list_for_each_entry(child, &bus->children, node)
572 struct pci_dev *self = child->self; 572 pcie_bus_configure_settings(child);
573 if (!self)
574 continue;
575
576 pcie_bus_configure_settings(child, self->pcie_mpss);
577 }
578 } 573 }
579 574
580 if (bus && node != -1) { 575 if (bus && node != -1) {
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 94919e307f8e..db6b1ab43255 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -210,6 +210,8 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
210 r = &dev->resource[idx]; 210 r = &dev->resource[idx];
211 if (!r->flags) 211 if (!r->flags)
212 continue; 212 continue;
213 if (r->parent) /* Already allocated */
214 continue;
213 if (!r->start || pci_claim_resource(dev, idx) < 0) { 215 if (!r->start || pci_claim_resource(dev, idx) < 0) {
214 /* 216 /*
215 * Something is wrong with the region. 217 * Something is wrong with the region.
@@ -318,6 +320,8 @@ static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
318 r = &dev->resource[PCI_ROM_RESOURCE]; 320 r = &dev->resource[PCI_ROM_RESOURCE];
319 if (!r->flags || !r->start) 321 if (!r->flags || !r->start)
320 return; 322 return;
323 if (r->parent) /* Already allocated */
324 return;
321 325
322 if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { 326 if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
323 r->end -= r->start; 327 r->end -= r->start;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 082e88129712..5596c7bdd327 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) 700 if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
701 return -ENODEV; 701 return -ENODEV;
702 702
703 if (start > end) 703 if (start > end || !addr)
704 return -EINVAL; 704 return -EINVAL;
705 705
706 mutex_lock(&pci_mmcfg_lock); 706 mutex_lock(&pci_mmcfg_lock);
@@ -716,11 +716,6 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
716 return -EEXIST; 716 return -EEXIST;
717 } 717 }
718 718
719 if (!addr) {
720 mutex_unlock(&pci_mmcfg_lock);
721 return -EINVAL;
722 }
723
724 rc = -EBUSY; 719 rc = -EBUSY;
725 cfg = pci_mmconfig_alloc(seg, start, end, addr); 720 cfg = pci_mmconfig_alloc(seg, start, end, addr);
726 if (cfg == NULL) { 721 if (cfg == NULL) {
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c42a28a..903fded50786 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -23,11 +23,11 @@
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/dmi.h> 25#include <linux/dmi.h>
26#include <linux/acpi.h>
27#include <linux/io.h>
28#include <linux/smp.h>
26 29
27#include <asm/acpi.h>
28#include <asm/segment.h> 30#include <asm/segment.h>
29#include <asm/io.h>
30#include <asm/smp.h>
31#include <asm/pci_x86.h> 31#include <asm/pci_x86.h>
32#include <asm/hw_irq.h> 32#include <asm/hw_irq.h>
33#include <asm/io_apic.h> 33#include <asm/io_apic.h>
@@ -43,7 +43,7 @@
43#define PCI_FIXED_BAR_4_SIZE 0x14 43#define PCI_FIXED_BAR_4_SIZE 0x14
44#define PCI_FIXED_BAR_5_SIZE 0x1c 44#define PCI_FIXED_BAR_5_SIZE 0x1c
45 45
46static int pci_soc_mode = 0; 46static int pci_soc_mode;
47 47
48/** 48/**
49 * fixed_bar_cap - return the offset of the fixed BAR cap if found 49 * fixed_bar_cap - return the offset of the fixed BAR cap if found
@@ -141,7 +141,8 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
141 */ 141 */
142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) 142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
143{ 143{
144 /* This is a workaround for A0 LNC bug where PCI status register does 144 /*
145 * This is a workaround for A0 LNC bug where PCI status register does
145 * not have new CAP bit set. can not be written by SW either. 146 * not have new CAP bit set. can not be written by SW either.
146 * 147 *
147 * PCI header type in real LNC indicates a single function device, this 148 * PCI header type in real LNC indicates a single function device, this
@@ -154,7 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
154 || devfn == PCI_DEVFN(0, 0) 155 || devfn == PCI_DEVFN(0, 0)
155 || devfn == PCI_DEVFN(3, 0))) 156 || devfn == PCI_DEVFN(3, 0)))
156 return 1; 157 return 1;
157 return 0; /* langwell on others */ 158 return 0; /* Langwell on others */
158} 159}
159 160
160static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, 161static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -172,7 +173,8 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
172{ 173{
173 int offset; 174 int offset;
174 175
175 /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read 176 /*
177 * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
176 * to ROM BAR return 0 then being ignored. 178 * to ROM BAR return 0 then being ignored.
177 */ 179 */
178 if (where == PCI_ROM_ADDRESS) 180 if (where == PCI_ROM_ADDRESS)
@@ -210,7 +212,8 @@ static int mrst_pci_irq_enable(struct pci_dev *dev)
210 212
211 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); 213 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
212 214
213 /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to 215 /*
216 * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
214 * IOAPIC RTE entries, so we just enable RTE for the device. 217 * IOAPIC RTE entries, so we just enable RTE for the device.
215 */ 218 */
216 irq_attr.ioapic = mp_find_ioapic(dev->irq); 219 irq_attr.ioapic = mp_find_ioapic(dev->irq);
@@ -235,7 +238,7 @@ struct pci_ops pci_mrst_ops = {
235 */ 238 */
236int __init pci_mrst_init(void) 239int __init pci_mrst_init(void)
237{ 240{
238 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n"); 241 pr_info("Intel MID platform detected, using MID PCI ops\n");
239 pci_mmcfg_late_init(); 242 pci_mmcfg_late_init();
240 pcibios_enable_irq = mrst_pci_irq_enable; 243 pcibios_enable_irq = mrst_pci_irq_enable;
241 pci_root_ops = pci_mrst_ops; 244 pci_root_ops = pci_mrst_ops;
@@ -244,17 +247,21 @@ int __init pci_mrst_init(void)
244 return 1; 247 return 1;
245} 248}
246 249
247/* Langwell devices are not true pci devices, they are not subject to 10 ms 250/*
248 * d3 to d0 delay required by pci spec. 251 * Langwell devices are not true PCI devices; they are not subject to 10 ms
252 * d3 to d0 delay required by PCI spec.
249 */ 253 */
250static void pci_d3delay_fixup(struct pci_dev *dev) 254static void pci_d3delay_fixup(struct pci_dev *dev)
251{ 255{
252 /* PCI fixups are effectively decided compile time. If we have a dual 256 /*
253 SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */ 257 * PCI fixups are effectively decided compile time. If we have a dual
254 if (!pci_soc_mode) 258 * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
255 return; 259 */
256 /* true pci devices in lincroft should allow type 1 access, the rest 260 if (!pci_soc_mode)
257 * are langwell fake pci devices. 261 return;
262 /*
263 * True PCI devices in Lincroft should allow type 1 access, the rest
264 * are Langwell fake PCI devices.
258 */ 265 */
259 if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) 266 if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
260 return; 267 return;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 1cf5b300305e..424f4c97a44d 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -25,10 +25,10 @@
25#include <asm/cpu.h> 25#include <asm/cpu.h>
26 26
27#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
28unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
29unsigned long saved_context_esp, saved_context_ebp; 29__visible unsigned long saved_context_esp, saved_context_ebp;
30unsigned long saved_context_esi, saved_context_edi; 30__visible unsigned long saved_context_esi, saved_context_edi;
31unsigned long saved_context_eflags; 31__visible unsigned long saved_context_eflags;
32#endif 32#endif
33struct saved_context saved_context; 33struct saved_context saved_context;
34 34
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a0fde91c16cf..304fca20d96e 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -20,26 +20,26 @@
20#include <asm/suspend.h> 20#include <asm/suspend.h>
21 21
22/* References to section boundaries */ 22/* References to section boundaries */
23extern const void __nosave_begin, __nosave_end; 23extern __visible const void __nosave_begin, __nosave_end;
24 24
25/* Defined in hibernate_asm_64.S */ 25/* Defined in hibernate_asm_64.S */
26extern int restore_image(void); 26extern asmlinkage int restore_image(void);
27 27
28/* 28/*
29 * Address to jump to in the last phase of restore in order to get to the image 29 * Address to jump to in the last phase of restore in order to get to the image
30 * kernel's text (this value is passed in the image header). 30 * kernel's text (this value is passed in the image header).
31 */ 31 */
32unsigned long restore_jump_address; 32unsigned long restore_jump_address __visible;
33 33
34/* 34/*
35 * Value of the cr3 register from before the hibernation (this value is passed 35 * Value of the cr3 register from before the hibernation (this value is passed
36 * in the image header). 36 * in the image header).
37 */ 37 */
38unsigned long restore_cr3; 38unsigned long restore_cr3 __visible;
39 39
40pgd_t *temp_level4_pgt; 40pgd_t *temp_level4_pgt __visible;
41 41
42void *relocated_restore_code; 42void *relocated_restore_code __visible;
43 43
44static void *alloc_pgt_page(void *context) 44static void *alloc_pgt_page(void *context)
45{ 45{
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index e6773dc8ac41..093a892026f9 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -68,7 +68,7 @@ BEGIN {
68 68
69 lprefix1_expr = "\\((66|!F3)\\)" 69 lprefix1_expr = "\\((66|!F3)\\)"
70 lprefix2_expr = "\\(F3\\)" 70 lprefix2_expr = "\\(F3\\)"
71 lprefix3_expr = "\\((F2|!F3)\\)" 71 lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
72 lprefix_expr = "\\((66|F2|F3)\\)" 72 lprefix_expr = "\\((66|F2|F3)\\)"
73 max_lprefix = 4 73 max_lprefix = 4
74 74
@@ -83,6 +83,8 @@ BEGIN {
83 prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" 83 prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
84 prefix_num["REPNE"] = "INAT_PFX_REPNE" 84 prefix_num["REPNE"] = "INAT_PFX_REPNE"
85 prefix_num["REP/REPE"] = "INAT_PFX_REPE" 85 prefix_num["REP/REPE"] = "INAT_PFX_REPE"
86 prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
87 prefix_num["XRELEASE"] = "INAT_PFX_REPE"
86 prefix_num["LOCK"] = "INAT_PFX_LOCK" 88 prefix_num["LOCK"] = "INAT_PFX_LOCK"
87 prefix_num["SEG=CS"] = "INAT_PFX_CS" 89 prefix_num["SEG=CS"] = "INAT_PFX_CS"
88 prefix_num["SEG=DS"] = "INAT_PFX_DS" 90 prefix_num["SEG=DS"] = "INAT_PFX_DS"
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index c74436e687bf..72074d528400 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -85,15 +85,18 @@ static notrace cycle_t vread_pvclock(int *mode)
85 cycle_t ret; 85 cycle_t ret;
86 u64 last; 86 u64 last;
87 u32 version; 87 u32 version;
88 u32 migrate_count;
89 u8 flags; 88 u8 flags;
90 unsigned cpu, cpu1; 89 unsigned cpu, cpu1;
91 90
92 91
93 /* 92 /*
94 * When looping to get a consistent (time-info, tsc) pair, we 93 * Note: hypervisor must guarantee that:
95 * also need to deal with the possibility we can switch vcpus, 94 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
96 * so make sure we always re-fetch time-info for the current vcpu. 95 * 2. that per-CPU pvclock time info is updated if the
96 * underlying CPU changes.
97 * 3. that version is increased whenever underlying CPU
98 * changes.
99 *
97 */ 100 */
98 do { 101 do {
99 cpu = __getcpu() & VGETCPU_CPU_MASK; 102 cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -104,8 +107,6 @@ static notrace cycle_t vread_pvclock(int *mode)
104 107
105 pvti = get_pvti(cpu); 108 pvti = get_pvti(cpu);
106 109
107 migrate_count = pvti->migrate_count;
108
109 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags); 110 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
110 111
111 /* 112 /*
@@ -117,8 +118,7 @@ static notrace cycle_t vread_pvclock(int *mode)
117 cpu1 = __getcpu() & VGETCPU_CPU_MASK; 118 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
118 } while (unlikely(cpu != cpu1 || 119 } while (unlikely(cpu != cpu1 ||
119 (pvti->pvti.version & 1) || 120 (pvti->pvti.version & 1) ||
120 pvti->pvti.version != version || 121 pvti->pvti.version != version));
121 pvti->migrate_count != migrate_count));
122 122
123 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT))) 123 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
124 *mode = VCLOCK_NONE; 124 *mode = VCLOCK_NONE;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 193097ef3d7d..2fc216dfbd9c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -427,8 +427,7 @@ static void __init xen_init_cpuid_mask(void)
427 427
428 if (!xen_initial_domain()) 428 if (!xen_initial_domain())
429 cpuid_leaf1_edx_mask &= 429 cpuid_leaf1_edx_mask &=
430 ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ 430 ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
431 (1 << X86_FEATURE_ACPI)); /* disable ACPI */
432 431
433 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32)); 432 cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
434 433
@@ -735,8 +734,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
735 addr = (unsigned long)xen_int3; 734 addr = (unsigned long)xen_int3;
736 else if (addr == (unsigned long)stack_segment) 735 else if (addr == (unsigned long)stack_segment)
737 addr = (unsigned long)xen_stack_segment; 736 addr = (unsigned long)xen_stack_segment;
738 else if (addr == (unsigned long)double_fault || 737 else if (addr == (unsigned long)double_fault) {
739 addr == (unsigned long)nmi) {
740 /* Don't need to handle these */ 738 /* Don't need to handle these */
741 return 0; 739 return 0;
742#ifdef CONFIG_X86_MCE 740#ifdef CONFIG_X86_MCE
@@ -747,7 +745,12 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
747 */ 745 */
748 ; 746 ;
749#endif 747#endif
750 } else { 748 } else if (addr == (unsigned long)nmi)
749 /*
750 * Use the native version as well.
751 */
752 ;
753 else {
751 /* Some other trap using IST? */ 754 /* Some other trap using IST? */
752 if (WARN_ON(val->ist != 0)) 755 if (WARN_ON(val->ist != 0))
753 return 0; 756 return 0;
@@ -1710,6 +1713,8 @@ static void __init xen_hvm_guest_init(void)
1710 1713
1711 xen_hvm_init_shared_info(); 1714 xen_hvm_init_shared_info();
1712 1715
1716 xen_panic_handler_init();
1717
1713 if (xen_feature(XENFEAT_hvm_callback_vector)) 1718 if (xen_feature(XENFEAT_hvm_callback_vector))
1714 xen_have_vector_callback = 1; 1719 xen_have_vector_callback = 1;
1715 xen_hvm_smp_init(); 1720 xen_hvm_smp_init();
@@ -1720,15 +1725,12 @@ static void __init xen_hvm_guest_init(void)
1720 xen_hvm_init_mmu_ops(); 1725 xen_hvm_init_mmu_ops();
1721} 1726}
1722 1727
1723static bool __init xen_hvm_platform(void) 1728static uint32_t __init xen_hvm_platform(void)
1724{ 1729{
1725 if (xen_pv_domain()) 1730 if (xen_pv_domain())
1726 return false; 1731 return 0;
1727
1728 if (!xen_cpuid_base())
1729 return false;
1730 1732
1731 return true; 1733 return xen_cpuid_base();
1732} 1734}
1733 1735
1734bool xen_hvm_need_lapic(void) 1736bool xen_hvm_need_lapic(void)
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 01a4dc015ae1..0da7f863056f 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -47,23 +47,18 @@ static void xen_restore_fl(unsigned long flags)
47 /* convert from IF type flag */ 47 /* convert from IF type flag */
48 flags = !(flags & X86_EFLAGS_IF); 48 flags = !(flags & X86_EFLAGS_IF);
49 49
50 /* There's a one instruction preempt window here. We need to 50 /* See xen_irq_enable() for why preemption must be disabled. */
51 make sure we're don't switch CPUs between getting the vcpu
52 pointer and updating the mask. */
53 preempt_disable(); 51 preempt_disable();
54 vcpu = this_cpu_read(xen_vcpu); 52 vcpu = this_cpu_read(xen_vcpu);
55 vcpu->evtchn_upcall_mask = flags; 53 vcpu->evtchn_upcall_mask = flags;
56 preempt_enable_no_resched();
57
58 /* Doesn't matter if we get preempted here, because any
59 pending event will get dealt with anyway. */
60 54
61 if (flags == 0) { 55 if (flags == 0) {
62 preempt_check_resched();
63 barrier(); /* unmask then check (avoid races) */ 56 barrier(); /* unmask then check (avoid races) */
64 if (unlikely(vcpu->evtchn_upcall_pending)) 57 if (unlikely(vcpu->evtchn_upcall_pending))
65 xen_force_evtchn_callback(); 58 xen_force_evtchn_callback();
66 } 59 preempt_enable();
60 } else
61 preempt_enable_no_resched();
67} 62}
68PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl); 63PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
69 64
@@ -82,10 +77,12 @@ static void xen_irq_enable(void)
82{ 77{
83 struct vcpu_info *vcpu; 78 struct vcpu_info *vcpu;
84 79
85 /* We don't need to worry about being preempted here, since 80 /*
86 either a) interrupts are disabled, so no preemption, or b) 81 * We may be preempted as soon as vcpu->evtchn_upcall_mask is
87 the caller is confused and is trying to re-enable interrupts 82 * cleared, so disable preemption to ensure we check for
88 on an indeterminate processor. */ 83 * events on the VCPU we are still running on.
84 */
85 preempt_disable();
89 86
90 vcpu = this_cpu_read(xen_vcpu); 87 vcpu = this_cpu_read(xen_vcpu);
91 vcpu->evtchn_upcall_mask = 0; 88 vcpu->evtchn_upcall_mask = 0;
@@ -96,6 +93,8 @@ static void xen_irq_enable(void)
96 barrier(); /* unmask then check (avoid races) */ 93 barrier(); /* unmask then check (avoid races) */
97 if (unlikely(vcpu->evtchn_upcall_pending)) 94 if (unlikely(vcpu->evtchn_upcall_pending))
98 xen_force_evtchn_callback(); 95 xen_force_evtchn_callback();
96
97 preempt_enable();
99} 98}
100PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable); 99PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
101 100
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 95fb2aa5927e..0d4ec35895d4 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,6 +161,7 @@
161#include <asm/xen/page.h> 161#include <asm/xen/page.h>
162#include <asm/xen/hypercall.h> 162#include <asm/xen/hypercall.h>
163#include <asm/xen/hypervisor.h> 163#include <asm/xen/hypervisor.h>
164#include <xen/balloon.h>
164#include <xen/grant_table.h> 165#include <xen/grant_table.h>
165 166
166#include "multicalls.h" 167#include "multicalls.h"
@@ -967,7 +968,10 @@ int m2p_remove_override(struct page *page,
967 if (kmap_op != NULL) { 968 if (kmap_op != NULL) {
968 if (!PageHighMem(page)) { 969 if (!PageHighMem(page)) {
969 struct multicall_space mcs; 970 struct multicall_space mcs;
970 struct gnttab_unmap_grant_ref *unmap_op; 971 struct gnttab_unmap_and_replace *unmap_op;
972 struct page *scratch_page = get_balloon_scratch_page();
973 unsigned long scratch_page_address = (unsigned long)
974 __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
971 975
972 /* 976 /*
973 * It might be that we queued all the m2p grant table 977 * It might be that we queued all the m2p grant table
@@ -990,21 +994,25 @@ int m2p_remove_override(struct page *page,
990 } 994 }
991 995
992 mcs = xen_mc_entry( 996 mcs = xen_mc_entry(
993 sizeof(struct gnttab_unmap_grant_ref)); 997 sizeof(struct gnttab_unmap_and_replace));
994 unmap_op = mcs.args; 998 unmap_op = mcs.args;
995 unmap_op->host_addr = kmap_op->host_addr; 999 unmap_op->host_addr = kmap_op->host_addr;
1000 unmap_op->new_addr = scratch_page_address;
996 unmap_op->handle = kmap_op->handle; 1001 unmap_op->handle = kmap_op->handle;
997 unmap_op->dev_bus_addr = 0;
998 1002
999 MULTI_grant_table_op(mcs.mc, 1003 MULTI_grant_table_op(mcs.mc,
1000 GNTTABOP_unmap_grant_ref, unmap_op, 1); 1004 GNTTABOP_unmap_and_replace, unmap_op, 1);
1001 1005
1002 xen_mc_issue(PARAVIRT_LAZY_MMU); 1006 xen_mc_issue(PARAVIRT_LAZY_MMU);
1003 1007
1004 set_pte_at(&init_mm, address, ptep, 1008 mcs = __xen_mc_entry(0);
1005 pfn_pte(pfn, PAGE_KERNEL)); 1009 MULTI_update_va_mapping(mcs.mc, scratch_page_address,
1006 __flush_tlb_single(address); 1010 pfn_pte(page_to_pfn(get_balloon_scratch_page()),
1011 PAGE_KERNEL_RO), 0);
1012 xen_mc_issue(PARAVIRT_LAZY_MMU);
1013
1007 kmap_op->host_addr = 0; 1014 kmap_op->host_addr = 0;
1015 put_balloon_scratch_page();
1008 } 1016 }
1009 } 1017 }
1010 1018
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef21..09f3059cb00b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -33,6 +33,9 @@
33/* These are code, but not functions. Defined in entry.S */ 33/* These are code, but not functions. Defined in entry.S */
34extern const char xen_hypervisor_callback[]; 34extern const char xen_hypervisor_callback[];
35extern const char xen_failsafe_callback[]; 35extern const char xen_failsafe_callback[];
36#ifdef CONFIG_X86_64
37extern const char nmi[];
38#endif
36extern void xen_sysenter_target(void); 39extern void xen_sysenter_target(void);
37extern void xen_syscall_target(void); 40extern void xen_syscall_target(void);
38extern void xen_syscall32_target(void); 41extern void xen_syscall32_target(void);
@@ -215,13 +218,19 @@ static void __init xen_set_identity_and_release_chunk(
215 unsigned long pfn; 218 unsigned long pfn;
216 219
217 /* 220 /*
218 * If the PFNs are currently mapped, the VA mapping also needs 221 * If the PFNs are currently mapped, clear the mappings
219 * to be updated to be 1:1. 222 * (except for the ISA region which must be 1:1 mapped) to
223 * release the refcounts (in Xen) on the original frames.
220 */ 224 */
221 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) 225 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
226 pte_t pte = __pte_ma(0);
227
228 if (pfn < PFN_UP(ISA_END_ADDRESS))
229 pte = mfn_pte(pfn, PAGE_KERNEL_IO);
230
222 (void)HYPERVISOR_update_va_mapping( 231 (void)HYPERVISOR_update_va_mapping(
223 (unsigned long)__va(pfn << PAGE_SHIFT), 232 (unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
224 mfn_pte(pfn, PAGE_KERNEL_IO), 0); 233 }
225 234
226 if (start_pfn < nr_pages) 235 if (start_pfn < nr_pages)
227 *released += xen_release_chunk( 236 *released += xen_release_chunk(
@@ -313,6 +322,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
313 e820_add_region(start, end - start, type); 322 e820_add_region(start, end - start, type);
314} 323}
315 324
325void xen_ignore_unusable(struct e820entry *list, size_t map_size)
326{
327 struct e820entry *entry;
328 unsigned int i;
329
330 for (i = 0, entry = list; i < map_size; i++, entry++) {
331 if (entry->type == E820_UNUSABLE)
332 entry->type = E820_RAM;
333 }
334}
335
316/** 336/**
317 * machine_specific_memory_setup - Hook for machine specific memory setup. 337 * machine_specific_memory_setup - Hook for machine specific memory setup.
318 **/ 338 **/
@@ -353,6 +373,17 @@ char * __init xen_memory_setup(void)
353 } 373 }
354 BUG_ON(rc); 374 BUG_ON(rc);
355 375
376 /*
377 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
378 * regions, so if we're using the machine memory map leave the
379 * region as RAM as it is in the pseudo-physical map.
380 *
381 * UNUSABLE regions in domUs are not handled and will need
382 * a patch in the future.
383 */
384 if (xen_initial_domain())
385 xen_ignore_unusable(map, memmap.nr_entries);
386
356 /* Make sure the Xen-supplied memory map is well-ordered. */ 387 /* Make sure the Xen-supplied memory map is well-ordered. */
357 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); 388 sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
358 389
@@ -525,7 +556,13 @@ void xen_enable_syscall(void)
525 } 556 }
526#endif /* CONFIG_X86_64 */ 557#endif /* CONFIG_X86_64 */
527} 558}
528 559void __cpuinit xen_enable_nmi(void)
560{
561#ifdef CONFIG_X86_64
562 if (register_callback(CALLBACKTYPE_nmi, nmi))
563 BUG();
564#endif
565}
529void __init xen_arch_setup(void) 566void __init xen_arch_setup(void)
530{ 567{
531 xen_panic_handler_init(); 568 xen_panic_handler_init();
@@ -543,7 +580,7 @@ void __init xen_arch_setup(void)
543 580
544 xen_enable_sysenter(); 581 xen_enable_sysenter();
545 xen_enable_syscall(); 582 xen_enable_syscall();
546 583 xen_enable_nmi();
547#ifdef CONFIG_ACPI 584#ifdef CONFIG_ACPI
548 if (!(xen_start_info->flags & SIF_INITDOMAIN)) { 585 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
549 printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); 586 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb846..9235842cd76a 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -279,6 +279,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
279 279
280 xen_filter_cpu_maps(); 280 xen_filter_cpu_maps();
281 xen_setup_vcpu_info_placement(); 281 xen_setup_vcpu_info_placement();
282 xen_init_spinlocks();
282} 283}
283 284
284static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 285static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
@@ -572,6 +573,12 @@ static inline int xen_map_vector(int vector)
572 case IRQ_WORK_VECTOR: 573 case IRQ_WORK_VECTOR:
573 xen_vector = XEN_IRQ_WORK_VECTOR; 574 xen_vector = XEN_IRQ_WORK_VECTOR;
574 break; 575 break;
576#ifdef CONFIG_X86_64
577 case NMI_VECTOR:
578 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
579 xen_vector = XEN_NMI_VECTOR;
580 break;
581#endif
575 default: 582 default:
576 xen_vector = -1; 583 xen_vector = -1;
577 printk(KERN_ERR "xen: vector 0x%x is not implemented\n", 584 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
@@ -680,7 +687,6 @@ void __init xen_smp_init(void)
680{ 687{
681 smp_ops = xen_smp_ops; 688 smp_ops = xen_smp_ops;
682 xen_fill_possible_map(); 689 xen_fill_possible_map();
683 xen_init_spinlocks();
684} 690}
685 691
686static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) 692static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
@@ -694,8 +700,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 700static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 701{
696 int rc; 702 int rc;
697 rc = native_cpu_up(cpu, tidle); 703 /*
698 WARN_ON (xen_smp_intr_init(cpu)); 704 * xen_smp_intr_init() needs to run before native_cpu_up()
705 * so that IPI vectors are set up on the booting CPU before
706 * it is marked online in native_cpu_up().
707 */
708 rc = xen_smp_intr_init(cpu);
709 WARN_ON(rc);
710 if (!rc)
711 rc = native_cpu_up(cpu, tidle);
699 return rc; 712 return rc;
700} 713}
701 714
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cf3caee356b3..0438b9324a72 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,45 +17,44 @@
17#include "xen-ops.h" 17#include "xen-ops.h"
18#include "debugfs.h" 18#include "debugfs.h"
19 19
20#ifdef CONFIG_XEN_DEBUG_FS 20enum xen_contention_stat {
21static struct xen_spinlock_stats 21 TAKEN_SLOW,
22{ 22 TAKEN_SLOW_PICKUP,
23 u64 taken; 23 TAKEN_SLOW_SPURIOUS,
24 u32 taken_slow; 24 RELEASED_SLOW,
25 u32 taken_slow_nested; 25 RELEASED_SLOW_KICKED,
26 u32 taken_slow_pickup; 26 NR_CONTENTION_STATS
27 u32 taken_slow_spurious; 27};
28 u32 taken_slow_irqenable;
29 28
30 u64 released;
31 u32 released_slow;
32 u32 released_slow_kicked;
33 29
30#ifdef CONFIG_XEN_DEBUG_FS
34#define HISTO_BUCKETS 30 31#define HISTO_BUCKETS 30
35 u32 histo_spin_total[HISTO_BUCKETS+1]; 32static struct xen_spinlock_stats
36 u32 histo_spin_spinning[HISTO_BUCKETS+1]; 33{
34 u32 contention_stats[NR_CONTENTION_STATS];
37 u32 histo_spin_blocked[HISTO_BUCKETS+1]; 35 u32 histo_spin_blocked[HISTO_BUCKETS+1];
38
39 u64 time_total;
40 u64 time_spinning;
41 u64 time_blocked; 36 u64 time_blocked;
42} spinlock_stats; 37} spinlock_stats;
43 38
44static u8 zero_stats; 39static u8 zero_stats;
45 40
46static unsigned lock_timeout = 1 << 10;
47#define TIMEOUT lock_timeout
48
49static inline void check_zero(void) 41static inline void check_zero(void)
50{ 42{
51 if (unlikely(zero_stats)) { 43 u8 ret;
52 memset(&spinlock_stats, 0, sizeof(spinlock_stats)); 44 u8 old = ACCESS_ONCE(zero_stats);
53 zero_stats = 0; 45 if (unlikely(old)) {
46 ret = cmpxchg(&zero_stats, old, 0);
47 /* This ensures only one fellow resets the stat */
48 if (ret == old)
49 memset(&spinlock_stats, 0, sizeof(spinlock_stats));
54 } 50 }
55} 51}
56 52
57#define ADD_STATS(elem, val) \ 53static inline void add_stats(enum xen_contention_stat var, u32 val)
58 do { check_zero(); spinlock_stats.elem += (val); } while(0) 54{
55 check_zero();
56 spinlock_stats.contention_stats[var] += val;
57}
59 58
60static inline u64 spin_time_start(void) 59static inline u64 spin_time_start(void)
61{ 60{
@@ -74,22 +73,6 @@ static void __spin_time_accum(u64 delta, u32 *array)
74 array[HISTO_BUCKETS]++; 73 array[HISTO_BUCKETS]++;
75} 74}
76 75
77static inline void spin_time_accum_spinning(u64 start)
78{
79 u32 delta = xen_clocksource_read() - start;
80
81 __spin_time_accum(delta, spinlock_stats.histo_spin_spinning);
82 spinlock_stats.time_spinning += delta;
83}
84
85static inline void spin_time_accum_total(u64 start)
86{
87 u32 delta = xen_clocksource_read() - start;
88
89 __spin_time_accum(delta, spinlock_stats.histo_spin_total);
90 spinlock_stats.time_total += delta;
91}
92
93static inline void spin_time_accum_blocked(u64 start) 76static inline void spin_time_accum_blocked(u64 start)
94{ 77{
95 u32 delta = xen_clocksource_read() - start; 78 u32 delta = xen_clocksource_read() - start;
@@ -99,19 +82,15 @@ static inline void spin_time_accum_blocked(u64 start)
99} 82}
100#else /* !CONFIG_XEN_DEBUG_FS */ 83#else /* !CONFIG_XEN_DEBUG_FS */
101#define TIMEOUT (1 << 10) 84#define TIMEOUT (1 << 10)
102#define ADD_STATS(elem, val) do { (void)(val); } while(0) 85static inline void add_stats(enum xen_contention_stat var, u32 val)
86{
87}
103 88
104static inline u64 spin_time_start(void) 89static inline u64 spin_time_start(void)
105{ 90{
106 return 0; 91 return 0;
107} 92}
108 93
109static inline void spin_time_accum_total(u64 start)
110{
111}
112static inline void spin_time_accum_spinning(u64 start)
113{
114}
115static inline void spin_time_accum_blocked(u64 start) 94static inline void spin_time_accum_blocked(u64 start)
116{ 95{
117} 96}
@@ -134,227 +113,123 @@ typedef u16 xen_spinners_t;
134 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory"); 113 asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
135#endif 114#endif
136 115
137struct xen_spinlock { 116struct xen_lock_waiting {
138 unsigned char lock; /* 0 -> free; 1 -> locked */ 117 struct arch_spinlock *lock;
139 xen_spinners_t spinners; /* count of waiting cpus */ 118 __ticket_t want;
140}; 119};
141 120
142static int xen_spin_is_locked(struct arch_spinlock *lock)
143{
144 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
145
146 return xl->lock != 0;
147}
148
149static int xen_spin_is_contended(struct arch_spinlock *lock)
150{
151 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
152
153 /* Not strictly true; this is only the count of contended
154 lock-takers entering the slow path. */
155 return xl->spinners != 0;
156}
157
158static int xen_spin_trylock(struct arch_spinlock *lock)
159{
160 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
161 u8 old = 1;
162
163 asm("xchgb %b0,%1"
164 : "+q" (old), "+m" (xl->lock) : : "memory");
165
166 return old == 0;
167}
168
169static DEFINE_PER_CPU(char *, irq_name);
170static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 121static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
171static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 122static DEFINE_PER_CPU(char *, irq_name);
172 123static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
173/* 124static cpumask_t waiting_cpus;
174 * Mark a cpu as interested in a lock. Returns the CPU's previous
175 * lock of interest, in case we got preempted by an interrupt.
176 */
177static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
178{
179 struct xen_spinlock *prev;
180
181 prev = __this_cpu_read(lock_spinners);
182 __this_cpu_write(lock_spinners, xl);
183
184 wmb(); /* set lock of interest before count */
185
186 inc_spinners(xl);
187
188 return prev;
189}
190
191/*
192 * Mark a cpu as no longer interested in a lock. Restores previous
193 * lock of interest (NULL for none).
194 */
195static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
196{
197 dec_spinners(xl);
198 wmb(); /* decrement count before restoring lock */
199 __this_cpu_write(lock_spinners, prev);
200}
201 125
202static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) 126static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
203{ 127{
204 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
205 struct xen_spinlock *prev;
206 int irq = __this_cpu_read(lock_kicker_irq); 128 int irq = __this_cpu_read(lock_kicker_irq);
207 int ret; 129 struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
130 int cpu = smp_processor_id();
208 u64 start; 131 u64 start;
132 unsigned long flags;
209 133
210 /* If kicker interrupts not initialized yet, just spin */ 134 /* If kicker interrupts not initialized yet, just spin */
211 if (irq == -1) 135 if (irq == -1)
212 return 0; 136 return;
213 137
214 start = spin_time_start(); 138 start = spin_time_start();
215 139
216 /* announce we're spinning */ 140 /*
217 prev = spinning_lock(xl); 141 * Make sure an interrupt handler can't upset things in a
142 * partially setup state.
143 */
144 local_irq_save(flags);
145 /*
146 * We don't really care if we're overwriting some other
147 * (lock,want) pair, as that would mean that we're currently
148 * in an interrupt context, and the outer context had
149 * interrupts enabled. That has already kicked the VCPU out
150 * of xen_poll_irq(), so it will just return spuriously and
151 * retry with newly setup (lock,want).
152 *
153 * The ordering protocol on this is that the "lock" pointer
154 * may only be set non-NULL if the "want" ticket is correct.
155 * If we're updating "want", we must first clear "lock".
156 */
157 w->lock = NULL;
158 smp_wmb();
159 w->want = want;
160 smp_wmb();
161 w->lock = lock;
218 162
219 ADD_STATS(taken_slow, 1); 163 /* This uses set_bit, which atomic and therefore a barrier */
220 ADD_STATS(taken_slow_nested, prev != NULL); 164 cpumask_set_cpu(cpu, &waiting_cpus);
165 add_stats(TAKEN_SLOW, 1);
221 166
222 do { 167 /* clear pending */
223 unsigned long flags; 168 xen_clear_irq_pending(irq);
224 169
225 /* clear pending */ 170 /* Only check lock once pending cleared */
226 xen_clear_irq_pending(irq); 171 barrier();
227 172
228 /* check again make sure it didn't become free while 173 /*
229 we weren't looking */ 174 * Mark entry to slowpath before doing the pickup test to make
230 ret = xen_spin_trylock(lock); 175 * sure we don't deadlock with an unlocker.
231 if (ret) { 176 */
232 ADD_STATS(taken_slow_pickup, 1); 177 __ticket_enter_slowpath(lock);
233 178
234 /* 179 /*
235 * If we interrupted another spinlock while it 180 * check again make sure it didn't become free while
236 * was blocking, make sure it doesn't block 181 * we weren't looking
237 * without rechecking the lock. 182 */
238 */ 183 if (ACCESS_ONCE(lock->tickets.head) == want) {
239 if (prev != NULL) 184 add_stats(TAKEN_SLOW_PICKUP, 1);
240 xen_set_irq_pending(irq); 185 goto out;
241 goto out; 186 }
242 }
243 187
244 flags = arch_local_save_flags(); 188 /* Allow interrupts while blocked */
245 if (irq_enable) { 189 local_irq_restore(flags);
246 ADD_STATS(taken_slow_irqenable, 1);
247 raw_local_irq_enable();
248 }
249 190
250 /* 191 /*
251 * Block until irq becomes pending. If we're 192 * If an interrupt happens here, it will leave the wakeup irq
252 * interrupted at this point (after the trylock but 193 * pending, which will cause xen_poll_irq() to return
253 * before entering the block), then the nested lock 194 * immediately.
254 * handler guarantees that the irq will be left 195 */
255 * pending if there's any chance the lock became free;
256 * xen_poll_irq() returns immediately if the irq is
257 * pending.
258 */
259 xen_poll_irq(irq);
260 196
261 raw_local_irq_restore(flags); 197 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
198 xen_poll_irq(irq);
199 add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
262 200
263 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 201 local_irq_save(flags);
264 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
265 202
266 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); 203 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
267
268out: 204out:
269 unspinning_lock(xl, prev); 205 cpumask_clear_cpu(cpu, &waiting_cpus);
270 spin_time_accum_blocked(start); 206 w->lock = NULL;
271
272 return ret;
273}
274
275static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
276{
277 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
278 unsigned timeout;
279 u8 oldval;
280 u64 start_spin;
281
282 ADD_STATS(taken, 1);
283
284 start_spin = spin_time_start();
285
286 do {
287 u64 start_spin_fast = spin_time_start();
288
289 timeout = TIMEOUT;
290
291 asm("1: xchgb %1,%0\n"
292 " testb %1,%1\n"
293 " jz 3f\n"
294 "2: rep;nop\n"
295 " cmpb $0,%0\n"
296 " je 1b\n"
297 " dec %2\n"
298 " jnz 2b\n"
299 "3:\n"
300 : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
301 : "1" (1)
302 : "memory");
303 207
304 spin_time_accum_spinning(start_spin_fast); 208 local_irq_restore(flags);
305 209
306 } while (unlikely(oldval != 0 && 210 spin_time_accum_blocked(start);
307 (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
308
309 spin_time_accum_total(start_spin);
310}
311
312static void xen_spin_lock(struct arch_spinlock *lock)
313{
314 __xen_spin_lock(lock, false);
315}
316
317static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
318{
319 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
320} 211}
212PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
321 213
322static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 214static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
323{ 215{
324 int cpu; 216 int cpu;
325 217
326 ADD_STATS(released_slow, 1); 218 add_stats(RELEASED_SLOW, 1);
219
220 for_each_cpu(cpu, &waiting_cpus) {
221 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
327 222
328 for_each_online_cpu(cpu) { 223 /* Make sure we read lock before want */
329 /* XXX should mix up next cpu selection */ 224 if (ACCESS_ONCE(w->lock) == lock &&
330 if (per_cpu(lock_spinners, cpu) == xl) { 225 ACCESS_ONCE(w->want) == next) {
331 ADD_STATS(released_slow_kicked, 1); 226 add_stats(RELEASED_SLOW_KICKED, 1);
332 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); 227 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
228 break;
333 } 229 }
334 } 230 }
335} 231}
336 232
337static void xen_spin_unlock(struct arch_spinlock *lock)
338{
339 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
340
341 ADD_STATS(released, 1);
342
343 smp_wmb(); /* make sure no writes get moved after unlock */
344 xl->lock = 0; /* release lock */
345
346 /*
347 * Make sure unlock happens before checking for waiting
348 * spinners. We need a strong barrier to enforce the
349 * write-read ordering to different memory locations, as the
350 * CPU makes no implied guarantees about their ordering.
351 */
352 mb();
353
354 if (unlikely(xl->spinners))
355 xen_spin_unlock_slow(xl);
356}
357
358static irqreturn_t dummy_handler(int irq, void *dev_id) 233static irqreturn_t dummy_handler(int irq, void *dev_id)
359{ 234{
360 BUG(); 235 BUG();
@@ -408,6 +283,8 @@ void xen_uninit_lock_cpu(int cpu)
408 per_cpu(irq_name, cpu) = NULL; 283 per_cpu(irq_name, cpu) = NULL;
409} 284}
410 285
286static bool xen_pvspin __initdata = true;
287
411void __init xen_init_spinlocks(void) 288void __init xen_init_spinlocks(void)
412{ 289{
413 /* 290 /*
@@ -417,15 +294,23 @@ void __init xen_init_spinlocks(void)
417 if (xen_hvm_domain()) 294 if (xen_hvm_domain())
418 return; 295 return;
419 296
420 BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t)); 297 if (!xen_pvspin) {
298 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
299 return;
300 }
421 301
422 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 302 static_key_slow_inc(&paravirt_ticketlocks_enabled);
423 pv_lock_ops.spin_is_contended = xen_spin_is_contended; 303
424 pv_lock_ops.spin_lock = xen_spin_lock; 304 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
425 pv_lock_ops.spin_lock_flags = xen_spin_lock_flags; 305 pv_lock_ops.unlock_kick = xen_unlock_kick;
426 pv_lock_ops.spin_trylock = xen_spin_trylock; 306}
427 pv_lock_ops.spin_unlock = xen_spin_unlock; 307
308static __init int xen_parse_nopvspin(char *arg)
309{
310 xen_pvspin = false;
311 return 0;
428} 312}
313early_param("xen_nopvspin", xen_parse_nopvspin);
429 314
430#ifdef CONFIG_XEN_DEBUG_FS 315#ifdef CONFIG_XEN_DEBUG_FS
431 316
@@ -442,37 +327,21 @@ static int __init xen_spinlock_debugfs(void)
442 327
443 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats); 328 debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
444 329
445 debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
446
447 debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
448 debugfs_create_u32("taken_slow", 0444, d_spin_debug, 330 debugfs_create_u32("taken_slow", 0444, d_spin_debug,
449 &spinlock_stats.taken_slow); 331 &spinlock_stats.contention_stats[TAKEN_SLOW]);
450 debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
451 &spinlock_stats.taken_slow_nested);
452 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug, 332 debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
453 &spinlock_stats.taken_slow_pickup); 333 &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
454 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, 334 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
455 &spinlock_stats.taken_slow_spurious); 335 &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
456 debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
457 &spinlock_stats.taken_slow_irqenable);
458 336
459 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
460 debugfs_create_u32("released_slow", 0444, d_spin_debug, 337 debugfs_create_u32("released_slow", 0444, d_spin_debug,
461 &spinlock_stats.released_slow); 338 &spinlock_stats.contention_stats[RELEASED_SLOW]);
462 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug, 339 debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
463 &spinlock_stats.released_slow_kicked); 340 &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
464 341
465 debugfs_create_u64("time_spinning", 0444, d_spin_debug,
466 &spinlock_stats.time_spinning);
467 debugfs_create_u64("time_blocked", 0444, d_spin_debug, 342 debugfs_create_u64("time_blocked", 0444, d_spin_debug,
468 &spinlock_stats.time_blocked); 343 &spinlock_stats.time_blocked);
469 debugfs_create_u64("time_total", 0444, d_spin_debug,
470 &spinlock_stats.time_total);
471 344
472 debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
473 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
474 debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
475 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
476 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, 345 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
477 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); 346 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
478 347
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 86782c5d7e2a..95f8c6142328 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -105,9 +105,9 @@ static inline void __init xen_init_apic(void)
105/* Declare an asm function, along with symbols needed to make it 105/* Declare an asm function, along with symbols needed to make it
106 inlineable */ 106 inlineable */
107#define DECL_ASM(ret, name, ...) \ 107#define DECL_ASM(ret, name, ...) \
108 ret name(__VA_ARGS__); \ 108 __visible ret name(__VA_ARGS__); \
109 extern char name##_end[]; \ 109 extern char name##_end[] __visible; \
110 extern char name##_reloc[] \ 110 extern char name##_reloc[] __visible
111 111
112DECL_ASM(void, xen_irq_enable_direct, void); 112DECL_ASM(void, xen_irq_enable_direct, void);
113DECL_ASM(void, xen_irq_disable_direct, void); 113DECL_ASM(void, xen_irq_disable_direct, void);
@@ -115,11 +115,11 @@ DECL_ASM(unsigned long, xen_save_fl_direct, void);
115DECL_ASM(void, xen_restore_fl_direct, unsigned long); 115DECL_ASM(void, xen_restore_fl_direct, unsigned long);
116 116
117/* These are not functions, and cannot be called normally */ 117/* These are not functions, and cannot be called normally */
118void xen_iret(void); 118__visible void xen_iret(void);
119void xen_sysexit(void); 119__visible void xen_sysexit(void);
120void xen_sysret32(void); 120__visible void xen_sysret32(void);
121void xen_sysret64(void); 121__visible void xen_sysret64(void);
122void xen_adjust_exception_frame(void); 122__visible void xen_adjust_exception_frame(void);
123 123
124extern int xen_panic_handler_init(void); 124extern int xen_panic_handler_init(void);
125 125