aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/include/asm/atomic.h88
-rw-r--r--arch/alpha/include/asm/param.h8
-rw-r--r--arch/alpha/include/asm/spinlock.h4
-rw-r--r--arch/alpha/include/asm/unistd.h3
-rw-r--r--arch/alpha/include/uapi/asm/param.h7
-rw-r--r--arch/alpha/include/uapi/asm/unistd.h2
-rw-r--r--arch/alpha/kernel/entry.S399
-rw-r--r--arch/alpha/kernel/irq_alpha.c2
-rw-r--r--arch/alpha/kernel/smp.c15
-rw-r--r--arch/alpha/kernel/sys_dp264.c8
-rw-r--r--arch/alpha/kernel/sys_marvel.c3
-rw-r--r--arch/alpha/kernel/systbls.S2
-rw-r--r--arch/alpha/kernel/time.c4
-rw-r--r--arch/alpha/kernel/traps.c12
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi22
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi1
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts13
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi32
-rw-r--r--arch/arm/boot/dts/prima2.dtsi16
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/stih416.dtsi2
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi6
-rw-r--r--arch/arm/boot/dts/vf610.dtsi8
-rw-r--r--arch/arm/common/edma.c1
-rw-r--r--arch/arm/common/mcpm_platsmp.c4
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/arm/configs/nhk8815_defconfig7
-rw-r--r--arch/arm/include/asm/arch_timer.h2
-rw-r--r--arch/arm/kernel/head-common.S1
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/kernel/head.S1
-rw-r--r--arch/arm/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm/kernel/perf_event_cpu.c6
-rw-r--r--arch/arm/kernel/psci_smp.c3
-rw-r--r--arch/arm/kernel/smp.c18
-rw-r--r--arch/arm/kernel/smp_twd.c6
-rw-r--r--arch/arm/lib/delay.c2
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm365.c2
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/common.c26
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c1
-rw-r--r--arch/arm/mach-exynos/headsmp.S2
-rw-r--r--arch/arm/mach-exynos/include/mach/memory.h5
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c6
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-highbank/highbank.c7
-rw-r--r--arch/arm/mach-highbank/platsmp.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/clk-vf610.c2
-rw-r--r--arch/arm/mach-imx/mx27.h2
-rw-r--r--arch/arm/mach-imx/platsmp.c2
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-keystone/platsmp.c2
-rw-r--r--arch/arm/mach-msm/headsmp.S2
-rw-r--r--arch/arm/mach-msm/platsmp.c6
-rw-r--r--arch/arm/mach-msm/timer.c4
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-mvebu/headsmp.S2
-rw-r--r--arch/arm/mach-mvebu/platsmp.c5
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-generic.c23
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S2
-rw-r--r--arch/arm/mach-omap2/omap-mpuss-lowpower.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c4
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c4
-rw-r--r--arch/arm/mach-prima2/headsmp.S2
-rw-r--r--arch/arm/mach-prima2/platsmp.c4
-rw-r--r--arch/arm/mach-pxa/em-x270.c17
-rw-r--r--arch/arm/mach-pxa/mainstone.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c3
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig2
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-shmobile/headsmp-scu.S1
-rw-r--r--arch/arm/mach-shmobile/headsmp.S2
-rw-r--r--arch/arm/mach-shmobile/smp-emev2.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7779.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-socfpga/headsmp.S1
-rw-r--r--arch/arm/mach-socfpga/platsmp.c2
-rw-r--r--arch/arm/mach-spear/generic.h2
-rw-r--r--arch/arm/mach-spear/platsmp.c4
-rw-r--r--arch/arm/mach-sti/Kconfig3
-rw-r--r--arch/arm/mach-sti/platsmp.c6
-rw-r--r--arch/arm/mach-tegra/platsmp.c4
-rw-r--r--arch/arm/mach-tegra/pm.c2
-rw-r--r--arch/arm/mach-ux500/platsmp.c4
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mach-zynq/common.h2
-rw-r--r--arch/arm/mach-zynq/headsmp.S2
-rw-r--r--arch/arm/mach-zynq/platsmp.c6
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm1022.S2
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm720.S2
-rw-r--r--arch/arm/mm/proc-arm740.S2
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm922.S2
-rw-r--r--arch/arm/mm/proc-arm925.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-arm940.S2
-rw-r--r--arch/arm/mm/proc-arm946.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-fa526.S2
-rw-r--r--arch/arm/mm/proc-feroceon.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa110.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-v6.S2
-rw-r--r--arch/arm/mm/proc-v7-2level.S4
-rw-r--r--arch/arm/mm/proc-v7-3level.S4
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/plat-samsung/Kconfig7
-rw-r--r--arch/arm/plat-samsung/Makefile2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/pm.c14
-rw-r--r--arch/arm/plat-versatile/platsmp.c6
-rw-r--r--arch/arm64/include/asm/arch_timer.h2
-rw-r--r--arch/arm64/include/asm/debug-monitors.h7
-rw-r--r--arch/arm64/include/asm/system_misc.h3
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/debug-monitors.c6
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c4
-rw-r--r--arch/arm64/kernel/process.c4
-rw-r--r--arch/arm64/kernel/smp.c23
-rw-r--r--arch/arm64/mm/fault.c46
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/blackfin/kernel/setup.c4
-rw-r--r--arch/blackfin/mach-bf561/smp.c6
-rw-r--r--arch/blackfin/mach-common/cache-c.c4
-rw-r--r--arch/blackfin/mach-common/ints-priority.c2
-rw-r--r--arch/blackfin/mach-common/smp.c18
-rw-r--r--arch/cris/arch-v32/kernel/smp.c2
-rw-r--r--arch/frv/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/setup.c2
-rw-r--r--arch/hexagon/kernel/smp.c4
-rw-r--r--arch/m32r/kernel/smpboot.c2
-rw-r--r--arch/metag/kernel/perf/perf_event.c6
-rw-r--r--arch/metag/kernel/smp.c22
-rw-r--r--arch/metag/kernel/traps.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/setup.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c12
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c5
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/include/asm/uasm.h37
-rw-r--r--arch/mips/kernel/bmips_vec.S4
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c2
-rw-r--r--arch/mips/kernel/cevt-gic.c2
-rw-r--r--arch/mips/kernel/cevt-r4k.c2
-rw-r--r--arch/mips/kernel/cevt-sb1250.c2
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c14
-rw-r--r--arch/mips/kernel/head.S4
-rw-r--r--arch/mips/kernel/smp-bmips.c8
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/smp-up.c6
-rw-r--r--arch/mips/kernel/smp.c6
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/spram.c14
-rw-r--r--arch/mips/kernel/sync-r4k.c12
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/watch.c2
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/lantiq/irq.c2
-rw-r--r--arch/mips/lib/uncached.c2
-rw-r--r--arch/mips/mm/c-octeon.c6
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c34
-rw-r--r--arch/mips/mm/c-tx39.c2
-rw-r--r--arch/mips/mm/cache.c2
-rw-r--r--arch/mips/mm/cex-sb1.S4
-rw-r--r--arch/mips/mm/page.c40
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c2
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/sc-rm7k.c12
-rw-r--r--arch/mips/mm/tlb-r3k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlb-r8k.c4
-rw-r--r--arch/mips/mm/tlbex.c148
-rw-r--r--arch/mips/mm/uasm-micromips.c10
-rw-r--r--arch/mips/mm/uasm-mips.c10
-rw-r--r--arch/mips/mm/uasm.c106
-rw-r--r--arch/mips/mti-malta/malta-smtc.c6
-rw-r--r--arch/mips/mti-malta/malta-time.c2
-rw-r--r--arch/mips/mti-sead3/sead3-time.c2
-rw-r--r--arch/mips/netlogic/common/irq.c68
-rw-r--r--arch/mips/netlogic/common/smp.c4
-rw-r--r--arch/mips/netlogic/common/smpboot.S4
-rw-r--r--arch/mips/netlogic/common/time.c2
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts3
-rw-r--r--arch/mips/netlogic/dts/xlp_svp.dts3
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c2
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c2
-rw-r--r--arch/mips/pci/pci-ip27.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c7
-rw-r--r--arch/mips/pmcs-msp71xx/msp_time.c2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c2
-rw-r--r--arch/mips/powertv/time.c2
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c6
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c8
-rw-r--r--arch/mips/sibyte/sb1250/smp.c8
-rw-r--r--arch/openrisc/kernel/setup.c2
-rw-r--r--arch/parisc/kernel/firmware.c14
-rw-r--r--arch/parisc/kernel/hardware.c2
-rw-r--r--arch/parisc/kernel/processor.c6
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/powerpc/include/asm/eeh.h30
-rw-r--r--arch/powerpc/include/asm/hw_irq.h7
-rw-r--r--arch/powerpc/include/asm/module.h5
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h1
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/eeh.c70
-rw-r--r--arch/powerpc/kernel/eeh_cache.c18
-rw-r--r--arch/powerpc/kernel/eeh_driver.c77
-rw-r--r--arch/powerpc/kernel/eeh_pe.c58
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c21
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c49
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c56
-rw-r--r--arch/powerpc/kernel/prom_init.c5
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/mm/hash_native_64.c12
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c24
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c17
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c67
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c3
-rw-r--r--arch/s390/include/asm/processor.h10
-rw-r--r--arch/s390/include/asm/switch_to.h4
-rw-r--r--arch/s390/include/uapi/asm/ptrace.h1
-rw-r--r--arch/s390/kernel/cache.c15
-rw-r--r--arch/s390/kernel/crash_dump.c51
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c4
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/ptrace.c50
-rw-r--r--arch/s390/kernel/smp.c17
-rw-r--r--arch/s390/kernel/sysinfo.c2
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/s390/mm/fault.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c113
-rw-r--r--arch/score/mm/tlb-score.c2
-rw-r--r--arch/sh/kernel/cpu/init.c18
-rw-r--r--arch/sh/kernel/cpu/sh2/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh2a/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh3/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/smp-shx3.c6
-rw-r--r--arch/sh/kernel/cpu/sh5/probe.c2
-rw-r--r--arch/sh/kernel/perf_event.c4
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh/kernel/smp.c8
-rw-r--r--arch/sh/kernel/traps_32.c2
-rw-r--r--arch/sh/kernel/traps_64.c2
-rw-r--r--arch/sh/mm/tlb-sh5.c2
-rw-r--r--arch/sparc/kernel/ds.c11
-rw-r--r--arch/sparc/kernel/entry.h2
-rw-r--r--arch/sparc/kernel/hvtramp.S1
-rw-r--r--arch/sparc/kernel/irq_64.c5
-rw-r--r--arch/sparc/kernel/leon_smp.c10
-rw-r--r--arch/sparc/kernel/mdesc.c34
-rw-r--r--arch/sparc/kernel/smp_32.c20
-rw-r--r--arch/sparc/kernel/smp_64.c9
-rw-r--r--arch/sparc/kernel/sun4d_smp.c6
-rw-r--r--arch/sparc/kernel/sun4m_smp.c6
-rw-r--r--arch/sparc/kernel/sysfs.c4
-rw-r--r--arch/sparc/kernel/trampoline_32.S3
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/sparc/mm/srmmu.c12
-rw-r--r--arch/tile/kernel/irq.c2
-rw-r--r--arch/tile/kernel/messaging.c2
-rw-r--r--arch/tile/kernel/setup.c12
-rw-r--r--arch/tile/kernel/smpboot.c8
-rw-r--r--arch/tile/kernel/time.c2
-rw-r--r--arch/um/include/shared/frame_kern.h8
-rw-r--r--arch/um/kernel/signal.c4
-rw-r--r--arch/um/kernel/skas/mmu.c2
-rw-r--r--arch/um/kernel/skas/uaccess.c2
-rw-r--r--arch/um/os-Linux/mem.c230
-rw-r--r--arch/um/os-Linux/signal.c8
-rw-r--r--arch/um/os-Linux/skas/process.c19
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S643
-rw-r--r--arch/x86/crypto/crct10dif-pclmul_glue.c151
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/microcode.h4
-rw-r--r--arch/x86/include/asm/microcode_amd.h4
-rw-r--r--arch/x86/include/asm/microcode_intel.h4
-rw-r--r--arch/x86/include/asm/mmconfig.h4
-rw-r--r--arch/x86/include/asm/mpspec.h2
-rw-r--r--arch/x86/include/asm/numa.h6
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/include/asm/prom.h2
-rw-r--r--arch/x86/include/asm/smp.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/acpi/sleep.c18
-rw-r--r--arch/x86/kernel/apic/apic.c30
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c14
-rw-r--r--arch/x86/kernel/cpu/amd.c33
-rw-r--r--arch/x86/kernel/cpu/centaur.c26
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/cyrix.c40
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c55
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/head_64.S15
-rw-r--r--arch/x86/kernel/i387.c10
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_amd_early.c8
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_core_early.c6
-rw-r--r--arch/x86/kernel/microcode_intel_early.c26
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c12
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tboot.c6
-rw-r--r--arch/x86/kernel/tracepoint.c6
-rw-r--r--arch/x86/kernel/traps.c12
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c18
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c4
-rw-r--r--arch/x86/kvm/mmu.c7
-rw-r--r--arch/x86/mm/mmio-mod.c4
-rw-r--r--arch/x86/mm/numa.c12
-rw-r--r--arch/x86/mm/numa_emulation.c12
-rw-r--r--arch/x86/mm/setup_nx.c4
-rw-r--r--arch/x86/pci/amd_bus.c8
-rw-r--r--arch/x86/platform/ce4100/ce4100.c3
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/mrst/mrst.c4
-rw-r--r--arch/x86/um/signal.c1
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/x86/xen/setup.c6
-rw-r--r--arch/x86/xen/smp.c12
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--arch/xtensa/kernel/time.c2
398 files changed, 2505 insertions, 2563 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 837a1f2d8b96..082d9b4b5472 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -15,6 +15,7 @@ config ALPHA
15 select ARCH_WANT_OPTIONAL_GPIOLIB 15 select ARCH_WANT_OPTIONAL_GPIOLIB
16 select ARCH_WANT_IPC_PARSE_VERSION 16 select ARCH_WANT_IPC_PARSE_VERSION
17 select ARCH_HAVE_NMI_SAFE_CMPXCHG 17 select ARCH_HAVE_NMI_SAFE_CMPXCHG
18 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
18 select GENERIC_SMP_IDLE_THREAD 19 select GENERIC_SMP_IDLE_THREAD
19 select GENERIC_CMOS_UPDATE 20 select GENERIC_CMOS_UPDATE
20 select GENERIC_STRNCPY_FROM_USER 21 select GENERIC_STRNCPY_FROM_USER
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index c2cbe4fc391c..78b03ef39f6f 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
186 */ 186 */
187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) 187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188{ 188{
189 int c, old; 189 int c, new, old;
190 c = atomic_read(v); 190 smp_mb();
191 for (;;) { 191 __asm__ __volatile__(
192 if (unlikely(c == (u))) 192 "1: ldl_l %[old],%[mem]\n"
193 break; 193 " cmpeq %[old],%[u],%[c]\n"
194 old = atomic_cmpxchg((v), c, c + (a)); 194 " addl %[old],%[a],%[new]\n"
195 if (likely(old == c)) 195 " bne %[c],2f\n"
196 break; 196 " stl_c %[new],%[mem]\n"
197 c = old; 197 " beq %[new],3f\n"
198 } 198 "2:\n"
199 return c; 199 ".subsection 2\n"
200 "3: br 1b\n"
201 ".previous"
202 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
203 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
204 : "memory");
205 smp_mb();
206 return old;
200} 207}
201 208
202 209
@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
207 * @u: ...unless v is equal to u. 214 * @u: ...unless v is equal to u.
208 * 215 *
209 * Atomically adds @a to @v, so long as it was not @u. 216 * Atomically adds @a to @v, so long as it was not @u.
210 * Returns the old value of @v. 217 * Returns true iff @v was not @u.
211 */ 218 */
212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) 219static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213{ 220{
214 long c, old; 221 long c, tmp;
215 c = atomic64_read(v); 222 smp_mb();
216 for (;;) { 223 __asm__ __volatile__(
217 if (unlikely(c == (u))) 224 "1: ldq_l %[tmp],%[mem]\n"
218 break; 225 " cmpeq %[tmp],%[u],%[c]\n"
219 old = atomic64_cmpxchg((v), c, c + (a)); 226 " addq %[tmp],%[a],%[tmp]\n"
220 if (likely(old == c)) 227 " bne %[c],2f\n"
221 break; 228 " stq_c %[tmp],%[mem]\n"
222 c = old; 229 " beq %[tmp],3f\n"
223 } 230 "2:\n"
224 return c != (u); 231 ".subsection 2\n"
232 "3: br 1b\n"
233 ".previous"
234 : [tmp] "=&r"(tmp), [c] "=&r"(c)
235 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
236 : "memory");
237 smp_mb();
238 return !c;
239}
240
241/*
242 * atomic64_dec_if_positive - decrement by 1 if old value positive
243 * @v: pointer of type atomic_t
244 *
245 * The function returns the old value of *v minus 1, even if
246 * the atomic variable, v, was not decremented.
247 */
248static inline long atomic64_dec_if_positive(atomic64_t *v)
249{
250 long old, tmp;
251 smp_mb();
252 __asm__ __volatile__(
253 "1: ldq_l %[old],%[mem]\n"
254 " subq %[old],1,%[tmp]\n"
255 " ble %[old],2f\n"
256 " stq_c %[tmp],%[mem]\n"
257 " beq %[tmp],3f\n"
258 "2:\n"
259 ".subsection 2\n"
260 "3: br 1b\n"
261 ".previous"
262 : [old] "=&r"(old), [tmp] "=&r"(tmp)
263 : [mem] "m"(*v)
264 : "memory");
265 smp_mb();
266 return old - 1;
225} 267}
226 268
227#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 269#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
diff --git a/arch/alpha/include/asm/param.h b/arch/alpha/include/asm/param.h
index bf46af51941b..a5b68b268bcf 100644
--- a/arch/alpha/include/asm/param.h
+++ b/arch/alpha/include/asm/param.h
@@ -3,7 +3,9 @@
3 3
4#include <uapi/asm/param.h> 4#include <uapi/asm/param.h>
5 5
6#define HZ CONFIG_HZ 6# undef HZ
7#define USER_HZ HZ 7# define HZ CONFIG_HZ
8# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ 8# define USER_HZ 1024
9# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
10
9#endif /* _ASM_ALPHA_PARAM_H */ 11#endif /* _ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h
index 3bba21e41b81..37b570d01202 100644
--- a/arch/alpha/include/asm/spinlock.h
+++ b/arch/alpha/include/asm/spinlock.h
@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 168#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 169#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
170 170
171#define arch_spin_relax(lock) cpu_relax()
172#define arch_read_relax(lock) cpu_relax()
173#define arch_write_relax(lock) cpu_relax()
174
175#endif /* _ALPHA_SPINLOCK_H */ 171#endif /* _ALPHA_SPINLOCK_H */
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index 43baee17acdf..f2c94402e2c8 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -3,8 +3,7 @@
3 3
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6#define NR_SYSCALLS 508
7#define NR_SYSCALLS 506
8 7
9#define __ARCH_WANT_OLD_READDIR 8#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_STAT64 9#define __ARCH_WANT_STAT64
diff --git a/arch/alpha/include/uapi/asm/param.h b/arch/alpha/include/uapi/asm/param.h
index 29daed819ebd..dbcd9834af6d 100644
--- a/arch/alpha/include/uapi/asm/param.h
+++ b/arch/alpha/include/uapi/asm/param.h
@@ -1,13 +1,7 @@
1#ifndef _UAPI_ASM_ALPHA_PARAM_H 1#ifndef _UAPI_ASM_ALPHA_PARAM_H
2#define _UAPI_ASM_ALPHA_PARAM_H 2#define _UAPI_ASM_ALPHA_PARAM_H
3 3
4/* ??? Gross. I don't want to parameterize this, and supposedly the
5 hardware ignores reprogramming. We also need userland buy-in to the
6 change in HZ, since this is visible in the wait4 resources etc. */
7
8#ifndef __KERNEL__
9#define HZ 1024 4#define HZ 1024
10#endif
11 5
12#define EXEC_PAGESIZE 8192 6#define EXEC_PAGESIZE 8192
13 7
@@ -17,5 +11,4 @@
17 11
18#define MAXHOSTNAMELEN 64 /* max length of hostname */ 12#define MAXHOSTNAMELEN 64 /* max length of hostname */
19 13
20
21#endif /* _UAPI_ASM_ALPHA_PARAM_H */ 14#endif /* _UAPI_ASM_ALPHA_PARAM_H */
diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h
index 801d28bcea51..53ae7bb1bfd1 100644
--- a/arch/alpha/include/uapi/asm/unistd.h
+++ b/arch/alpha/include/uapi/asm/unistd.h
@@ -467,5 +467,7 @@
467#define __NR_sendmmsg 503 467#define __NR_sendmmsg 503
468#define __NR_process_vm_readv 504 468#define __NR_process_vm_readv 504
469#define __NR_process_vm_writev 505 469#define __NR_process_vm_writev 505
470#define __NR_kcmp 506
471#define __NR_finit_module 507
470 472
471#endif /* _UAPI_ALPHA_UNISTD_H */ 473#endif /* _UAPI_ALPHA_UNISTD_H */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index f62a994ef126..a969b95ee5ac 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -12,11 +12,32 @@
12 12
13 .text 13 .text
14 .set noat 14 .set noat
15 .cfi_sections .debug_frame
15 16
16/* Stack offsets. */ 17/* Stack offsets. */
17#define SP_OFF 184 18#define SP_OFF 184
18#define SWITCH_STACK_SIZE 320 19#define SWITCH_STACK_SIZE 320
19 20
21.macro CFI_START_OSF_FRAME func
22 .align 4
23 .globl \func
24 .type \func,@function
25\func:
26 .cfi_startproc simple
27 .cfi_return_column 64
28 .cfi_def_cfa $sp, 48
29 .cfi_rel_offset 64, 8
30 .cfi_rel_offset $gp, 16
31 .cfi_rel_offset $16, 24
32 .cfi_rel_offset $17, 32
33 .cfi_rel_offset $18, 40
34.endm
35
36.macro CFI_END_OSF_FRAME func
37 .cfi_endproc
38 .size \func, . - \func
39.endm
40
20/* 41/*
21 * This defines the normal kernel pt-regs layout. 42 * This defines the normal kernel pt-regs layout.
22 * 43 *
@@ -27,100 +48,158 @@
27 * the palcode-provided values are available to the signal handler. 48 * the palcode-provided values are available to the signal handler.
28 */ 49 */
29 50
30#define SAVE_ALL \ 51.macro SAVE_ALL
31 subq $sp, SP_OFF, $sp; \ 52 subq $sp, SP_OFF, $sp
32 stq $0, 0($sp); \ 53 .cfi_adjust_cfa_offset SP_OFF
33 stq $1, 8($sp); \ 54 stq $0, 0($sp)
34 stq $2, 16($sp); \ 55 stq $1, 8($sp)
35 stq $3, 24($sp); \ 56 stq $2, 16($sp)
36 stq $4, 32($sp); \ 57 stq $3, 24($sp)
37 stq $28, 144($sp); \ 58 stq $4, 32($sp)
38 lda $2, alpha_mv; \ 59 stq $28, 144($sp)
39 stq $5, 40($sp); \ 60 .cfi_rel_offset $0, 0
40 stq $6, 48($sp); \ 61 .cfi_rel_offset $1, 8
41 stq $7, 56($sp); \ 62 .cfi_rel_offset $2, 16
42 stq $8, 64($sp); \ 63 .cfi_rel_offset $3, 24
43 stq $19, 72($sp); \ 64 .cfi_rel_offset $4, 32
44 stq $20, 80($sp); \ 65 .cfi_rel_offset $28, 144
45 stq $21, 88($sp); \ 66 lda $2, alpha_mv
46 ldq $2, HAE_CACHE($2); \ 67 stq $5, 40($sp)
47 stq $22, 96($sp); \ 68 stq $6, 48($sp)
48 stq $23, 104($sp); \ 69 stq $7, 56($sp)
49 stq $24, 112($sp); \ 70 stq $8, 64($sp)
50 stq $25, 120($sp); \ 71 stq $19, 72($sp)
51 stq $26, 128($sp); \ 72 stq $20, 80($sp)
52 stq $27, 136($sp); \ 73 stq $21, 88($sp)
53 stq $2, 152($sp); \ 74 ldq $2, HAE_CACHE($2)
54 stq $16, 160($sp); \ 75 stq $22, 96($sp)
55 stq $17, 168($sp); \ 76 stq $23, 104($sp)
77 stq $24, 112($sp)
78 stq $25, 120($sp)
79 stq $26, 128($sp)
80 stq $27, 136($sp)
81 stq $2, 152($sp)
82 stq $16, 160($sp)
83 stq $17, 168($sp)
56 stq $18, 176($sp) 84 stq $18, 176($sp)
85 .cfi_rel_offset $5, 40
86 .cfi_rel_offset $6, 48
87 .cfi_rel_offset $7, 56
88 .cfi_rel_offset $8, 64
89 .cfi_rel_offset $19, 72
90 .cfi_rel_offset $20, 80
91 .cfi_rel_offset $21, 88
92 .cfi_rel_offset $22, 96
93 .cfi_rel_offset $23, 104
94 .cfi_rel_offset $24, 112
95 .cfi_rel_offset $25, 120
96 .cfi_rel_offset $26, 128
97 .cfi_rel_offset $27, 136
98.endm
57 99
58#define RESTORE_ALL \ 100.macro RESTORE_ALL
59 lda $19, alpha_mv; \ 101 lda $19, alpha_mv
60 ldq $0, 0($sp); \ 102 ldq $0, 0($sp)
61 ldq $1, 8($sp); \ 103 ldq $1, 8($sp)
62 ldq $2, 16($sp); \ 104 ldq $2, 16($sp)
63 ldq $3, 24($sp); \ 105 ldq $3, 24($sp)
64 ldq $21, 152($sp); \ 106 ldq $21, 152($sp)
65 ldq $20, HAE_CACHE($19); \ 107 ldq $20, HAE_CACHE($19)
66 ldq $4, 32($sp); \ 108 ldq $4, 32($sp)
67 ldq $5, 40($sp); \ 109 ldq $5, 40($sp)
68 ldq $6, 48($sp); \ 110 ldq $6, 48($sp)
69 ldq $7, 56($sp); \ 111 ldq $7, 56($sp)
70 subq $20, $21, $20; \ 112 subq $20, $21, $20
71 ldq $8, 64($sp); \ 113 ldq $8, 64($sp)
72 beq $20, 99f; \ 114 beq $20, 99f
73 ldq $20, HAE_REG($19); \ 115 ldq $20, HAE_REG($19)
74 stq $21, HAE_CACHE($19); \ 116 stq $21, HAE_CACHE($19)
75 stq $21, 0($20); \ 117 stq $21, 0($20)
7699:; \ 11899: ldq $19, 72($sp)
77 ldq $19, 72($sp); \ 119 ldq $20, 80($sp)
78 ldq $20, 80($sp); \ 120 ldq $21, 88($sp)
79 ldq $21, 88($sp); \ 121 ldq $22, 96($sp)
80 ldq $22, 96($sp); \ 122 ldq $23, 104($sp)
81 ldq $23, 104($sp); \ 123 ldq $24, 112($sp)
82 ldq $24, 112($sp); \ 124 ldq $25, 120($sp)
83 ldq $25, 120($sp); \ 125 ldq $26, 128($sp)
84 ldq $26, 128($sp); \ 126 ldq $27, 136($sp)
85 ldq $27, 136($sp); \ 127 ldq $28, 144($sp)
86 ldq $28, 144($sp); \
87 addq $sp, SP_OFF, $sp 128 addq $sp, SP_OFF, $sp
129 .cfi_restore $0
130 .cfi_restore $1
131 .cfi_restore $2
132 .cfi_restore $3
133 .cfi_restore $4
134 .cfi_restore $5
135 .cfi_restore $6
136 .cfi_restore $7
137 .cfi_restore $8
138 .cfi_restore $19
139 .cfi_restore $20
140 .cfi_restore $21
141 .cfi_restore $22
142 .cfi_restore $23
143 .cfi_restore $24
144 .cfi_restore $25
145 .cfi_restore $26
146 .cfi_restore $27
147 .cfi_restore $28
148 .cfi_adjust_cfa_offset -SP_OFF
149.endm
150
151.macro DO_SWITCH_STACK
152 bsr $1, do_switch_stack
153 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
154 .cfi_rel_offset $9, 0
155 .cfi_rel_offset $10, 8
156 .cfi_rel_offset $11, 16
157 .cfi_rel_offset $12, 24
158 .cfi_rel_offset $13, 32
159 .cfi_rel_offset $14, 40
160 .cfi_rel_offset $15, 48
161 /* We don't really care about the FP registers for debugging. */
162.endm
163
164.macro UNDO_SWITCH_STACK
165 bsr $1, undo_switch_stack
166 .cfi_restore $9
167 .cfi_restore $10
168 .cfi_restore $11
169 .cfi_restore $12
170 .cfi_restore $13
171 .cfi_restore $14
172 .cfi_restore $15
173 .cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
174.endm
88 175
89/* 176/*
90 * Non-syscall kernel entry points. 177 * Non-syscall kernel entry points.
91 */ 178 */
92 179
93 .align 4 180CFI_START_OSF_FRAME entInt
94 .globl entInt
95 .ent entInt
96entInt:
97 SAVE_ALL 181 SAVE_ALL
98 lda $8, 0x3fff 182 lda $8, 0x3fff
99 lda $26, ret_from_sys_call 183 lda $26, ret_from_sys_call
100 bic $sp, $8, $8 184 bic $sp, $8, $8
101 mov $sp, $19 185 mov $sp, $19
102 jsr $31, do_entInt 186 jsr $31, do_entInt
103.end entInt 187CFI_END_OSF_FRAME entInt
104 188
105 .align 4 189CFI_START_OSF_FRAME entArith
106 .globl entArith
107 .ent entArith
108entArith:
109 SAVE_ALL 190 SAVE_ALL
110 lda $8, 0x3fff 191 lda $8, 0x3fff
111 lda $26, ret_from_sys_call 192 lda $26, ret_from_sys_call
112 bic $sp, $8, $8 193 bic $sp, $8, $8
113 mov $sp, $18 194 mov $sp, $18
114 jsr $31, do_entArith 195 jsr $31, do_entArith
115.end entArith 196CFI_END_OSF_FRAME entArith
116 197
117 .align 4 198CFI_START_OSF_FRAME entMM
118 .globl entMM
119 .ent entMM
120entMM:
121 SAVE_ALL 199 SAVE_ALL
122/* save $9 - $15 so the inline exception code can manipulate them. */ 200/* save $9 - $15 so the inline exception code can manipulate them. */
123 subq $sp, 56, $sp 201 subq $sp, 56, $sp
202 .cfi_adjust_cfa_offset 56
124 stq $9, 0($sp) 203 stq $9, 0($sp)
125 stq $10, 8($sp) 204 stq $10, 8($sp)
126 stq $11, 16($sp) 205 stq $11, 16($sp)
@@ -128,6 +207,13 @@ entMM:
128 stq $13, 32($sp) 207 stq $13, 32($sp)
129 stq $14, 40($sp) 208 stq $14, 40($sp)
130 stq $15, 48($sp) 209 stq $15, 48($sp)
210 .cfi_rel_offset $9, 0
211 .cfi_rel_offset $10, 8
212 .cfi_rel_offset $11, 16
213 .cfi_rel_offset $12, 24
214 .cfi_rel_offset $13, 32
215 .cfi_rel_offset $14, 40
216 .cfi_rel_offset $15, 48
131 addq $sp, 56, $19 217 addq $sp, 56, $19
132/* handle the fault */ 218/* handle the fault */
133 lda $8, 0x3fff 219 lda $8, 0x3fff
@@ -142,28 +228,33 @@ entMM:
142 ldq $14, 40($sp) 228 ldq $14, 40($sp)
143 ldq $15, 48($sp) 229 ldq $15, 48($sp)
144 addq $sp, 56, $sp 230 addq $sp, 56, $sp
231 .cfi_restore $9
232 .cfi_restore $10
233 .cfi_restore $11
234 .cfi_restore $12
235 .cfi_restore $13
236 .cfi_restore $14
237 .cfi_restore $15
238 .cfi_adjust_cfa_offset -56
145/* finish up the syscall as normal. */ 239/* finish up the syscall as normal. */
146 br ret_from_sys_call 240 br ret_from_sys_call
147.end entMM 241CFI_END_OSF_FRAME entMM
148 242
149 .align 4 243CFI_START_OSF_FRAME entIF
150 .globl entIF
151 .ent entIF
152entIF:
153 SAVE_ALL 244 SAVE_ALL
154 lda $8, 0x3fff 245 lda $8, 0x3fff
155 lda $26, ret_from_sys_call 246 lda $26, ret_from_sys_call
156 bic $sp, $8, $8 247 bic $sp, $8, $8
157 mov $sp, $17 248 mov $sp, $17
158 jsr $31, do_entIF 249 jsr $31, do_entIF
159.end entIF 250CFI_END_OSF_FRAME entIF
160 251
161 .align 4 252CFI_START_OSF_FRAME entUna
162 .globl entUna
163 .ent entUna
164entUna:
165 lda $sp, -256($sp) 253 lda $sp, -256($sp)
254 .cfi_adjust_cfa_offset 256
166 stq $0, 0($sp) 255 stq $0, 0($sp)
256 .cfi_rel_offset $0, 0
257 .cfi_remember_state
167 ldq $0, 256($sp) /* get PS */ 258 ldq $0, 256($sp) /* get PS */
168 stq $1, 8($sp) 259 stq $1, 8($sp)
169 stq $2, 16($sp) 260 stq $2, 16($sp)
@@ -195,6 +286,32 @@ entUna:
195 stq $28, 224($sp) 286 stq $28, 224($sp)
196 mov $sp, $19 287 mov $sp, $19
197 stq $gp, 232($sp) 288 stq $gp, 232($sp)
289 .cfi_rel_offset $1, 1*8
290 .cfi_rel_offset $2, 2*8
291 .cfi_rel_offset $3, 3*8
292 .cfi_rel_offset $4, 4*8
293 .cfi_rel_offset $5, 5*8
294 .cfi_rel_offset $6, 6*8
295 .cfi_rel_offset $7, 7*8
296 .cfi_rel_offset $8, 8*8
297 .cfi_rel_offset $9, 9*8
298 .cfi_rel_offset $10, 10*8
299 .cfi_rel_offset $11, 11*8
300 .cfi_rel_offset $12, 12*8
301 .cfi_rel_offset $13, 13*8
302 .cfi_rel_offset $14, 14*8
303 .cfi_rel_offset $15, 15*8
304 .cfi_rel_offset $19, 19*8
305 .cfi_rel_offset $20, 20*8
306 .cfi_rel_offset $21, 21*8
307 .cfi_rel_offset $22, 22*8
308 .cfi_rel_offset $23, 23*8
309 .cfi_rel_offset $24, 24*8
310 .cfi_rel_offset $25, 25*8
311 .cfi_rel_offset $26, 26*8
312 .cfi_rel_offset $27, 27*8
313 .cfi_rel_offset $28, 28*8
314 .cfi_rel_offset $29, 29*8
198 lda $8, 0x3fff 315 lda $8, 0x3fff
199 stq $31, 248($sp) 316 stq $31, 248($sp)
200 bic $sp, $8, $8 317 bic $sp, $8, $8
@@ -228,16 +345,45 @@ entUna:
228 ldq $28, 224($sp) 345 ldq $28, 224($sp)
229 ldq $gp, 232($sp) 346 ldq $gp, 232($sp)
230 lda $sp, 256($sp) 347 lda $sp, 256($sp)
348 .cfi_restore $1
349 .cfi_restore $2
350 .cfi_restore $3
351 .cfi_restore $4
352 .cfi_restore $5
353 .cfi_restore $6
354 .cfi_restore $7
355 .cfi_restore $8
356 .cfi_restore $9
357 .cfi_restore $10
358 .cfi_restore $11
359 .cfi_restore $12
360 .cfi_restore $13
361 .cfi_restore $14
362 .cfi_restore $15
363 .cfi_restore $19
364 .cfi_restore $20
365 .cfi_restore $21
366 .cfi_restore $22
367 .cfi_restore $23
368 .cfi_restore $24
369 .cfi_restore $25
370 .cfi_restore $26
371 .cfi_restore $27
372 .cfi_restore $28
373 .cfi_restore $29
374 .cfi_adjust_cfa_offset -256
231 call_pal PAL_rti 375 call_pal PAL_rti
232.end entUna
233 376
234 .align 4 377 .align 4
235 .ent entUnaUser
236entUnaUser: 378entUnaUser:
379 .cfi_restore_state
237 ldq $0, 0($sp) /* restore original $0 */ 380 ldq $0, 0($sp) /* restore original $0 */
238 lda $sp, 256($sp) /* pop entUna's stack frame */ 381 lda $sp, 256($sp) /* pop entUna's stack frame */
382 .cfi_restore $0
383 .cfi_adjust_cfa_offset -256
239 SAVE_ALL /* setup normal kernel stack */ 384 SAVE_ALL /* setup normal kernel stack */
240 lda $sp, -56($sp) 385 lda $sp, -56($sp)
386 .cfi_adjust_cfa_offset 56
241 stq $9, 0($sp) 387 stq $9, 0($sp)
242 stq $10, 8($sp) 388 stq $10, 8($sp)
243 stq $11, 16($sp) 389 stq $11, 16($sp)
@@ -245,6 +391,13 @@ entUnaUser:
245 stq $13, 32($sp) 391 stq $13, 32($sp)
246 stq $14, 40($sp) 392 stq $14, 40($sp)
247 stq $15, 48($sp) 393 stq $15, 48($sp)
394 .cfi_rel_offset $9, 0
395 .cfi_rel_offset $10, 8
396 .cfi_rel_offset $11, 16
397 .cfi_rel_offset $12, 24
398 .cfi_rel_offset $13, 32
399 .cfi_rel_offset $14, 40
400 .cfi_rel_offset $15, 48
248 lda $8, 0x3fff 401 lda $8, 0x3fff
249 addq $sp, 56, $19 402 addq $sp, 56, $19
250 bic $sp, $8, $8 403 bic $sp, $8, $8
@@ -257,20 +410,25 @@ entUnaUser:
257 ldq $14, 40($sp) 410 ldq $14, 40($sp)
258 ldq $15, 48($sp) 411 ldq $15, 48($sp)
259 lda $sp, 56($sp) 412 lda $sp, 56($sp)
413 .cfi_restore $9
414 .cfi_restore $10
415 .cfi_restore $11
416 .cfi_restore $12
417 .cfi_restore $13
418 .cfi_restore $14
419 .cfi_restore $15
420 .cfi_adjust_cfa_offset -56
260 br ret_from_sys_call 421 br ret_from_sys_call
261.end entUnaUser 422CFI_END_OSF_FRAME entUna
262 423
263 .align 4 424CFI_START_OSF_FRAME entDbg
264 .globl entDbg
265 .ent entDbg
266entDbg:
267 SAVE_ALL 425 SAVE_ALL
268 lda $8, 0x3fff 426 lda $8, 0x3fff
269 lda $26, ret_from_sys_call 427 lda $26, ret_from_sys_call
270 bic $sp, $8, $8 428 bic $sp, $8, $8
271 mov $sp, $16 429 mov $sp, $16
272 jsr $31, do_entDbg 430 jsr $31, do_entDbg
273.end entDbg 431CFI_END_OSF_FRAME entDbg
274 432
275/* 433/*
276 * The system call entry point is special. Most importantly, it looks 434 * The system call entry point is special. Most importantly, it looks
@@ -285,8 +443,12 @@ entDbg:
285 443
286 .align 4 444 .align 4
287 .globl entSys 445 .globl entSys
288 .globl ret_from_sys_call 446 .type entSys, @function
289 .ent entSys 447 .cfi_startproc simple
448 .cfi_return_column 64
449 .cfi_def_cfa $sp, 48
450 .cfi_rel_offset 64, 8
451 .cfi_rel_offset $gp, 16
290entSys: 452entSys:
291 SAVE_ALL 453 SAVE_ALL
292 lda $8, 0x3fff 454 lda $8, 0x3fff
@@ -300,6 +462,9 @@ entSys:
300 stq $17, SP_OFF+32($sp) 462 stq $17, SP_OFF+32($sp)
301 s8addq $0, $5, $5 463 s8addq $0, $5, $5
302 stq $18, SP_OFF+40($sp) 464 stq $18, SP_OFF+40($sp)
465 .cfi_rel_offset $16, SP_OFF+24
466 .cfi_rel_offset $17, SP_OFF+32
467 .cfi_rel_offset $18, SP_OFF+40
303 blbs $3, strace 468 blbs $3, strace
304 beq $4, 1f 469 beq $4, 1f
305 ldq $27, 0($5) 470 ldq $27, 0($5)
@@ -310,6 +475,7 @@ entSys:
310 stq $31, 72($sp) /* a3=0 => no error */ 475 stq $31, 72($sp) /* a3=0 => no error */
311 476
312 .align 4 477 .align 4
478 .globl ret_from_sys_call
313ret_from_sys_call: 479ret_from_sys_call:
314 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ 480 cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
315 ldq $0, SP_OFF($sp) 481 ldq $0, SP_OFF($sp)
@@ -324,10 +490,12 @@ ret_to_user:
324 and $17, _TIF_WORK_MASK, $2 490 and $17, _TIF_WORK_MASK, $2
325 bne $2, work_pending 491 bne $2, work_pending
326restore_all: 492restore_all:
493 .cfi_remember_state
327 RESTORE_ALL 494 RESTORE_ALL
328 call_pal PAL_rti 495 call_pal PAL_rti
329 496
330ret_to_kernel: 497ret_to_kernel:
498 .cfi_restore_state
331 lda $16, 7 499 lda $16, 7
332 call_pal PAL_swpipl 500 call_pal PAL_swpipl
333 br restore_all 501 br restore_all
@@ -356,7 +524,6 @@ $ret_success:
356 stq $0, 0($sp) 524 stq $0, 0($sp)
357 stq $31, 72($sp) /* a3=0 => no error */ 525 stq $31, 72($sp) /* a3=0 => no error */
358 br ret_from_sys_call 526 br ret_from_sys_call
359.end entSys
360 527
361/* 528/*
362 * Do all cleanup when returning from all interrupts and system calls. 529 * Do all cleanup when returning from all interrupts and system calls.
@@ -370,7 +537,7 @@ $ret_success:
370 */ 537 */
371 538
372 .align 4 539 .align 4
373 .ent work_pending 540 .type work_pending, @function
374work_pending: 541work_pending:
375 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 542 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
376 bne $2, $work_notifysig 543 bne $2, $work_notifysig
@@ -387,23 +554,22 @@ $work_resched:
387 554
388$work_notifysig: 555$work_notifysig:
389 mov $sp, $16 556 mov $sp, $16
390 bsr $1, do_switch_stack 557 DO_SWITCH_STACK
391 jsr $26, do_work_pending 558 jsr $26, do_work_pending
392 bsr $1, undo_switch_stack 559 UNDO_SWITCH_STACK
393 br restore_all 560 br restore_all
394.end work_pending
395 561
396/* 562/*
397 * PTRACE syscall handler 563 * PTRACE syscall handler
398 */ 564 */
399 565
400 .align 4 566 .align 4
401 .ent strace 567 .type strace, @function
402strace: 568strace:
403 /* set up signal stack, call syscall_trace */ 569 /* set up signal stack, call syscall_trace */
404 bsr $1, do_switch_stack 570 DO_SWITCH_STACK
405 jsr $26, syscall_trace_enter /* returns the syscall number */ 571 jsr $26, syscall_trace_enter /* returns the syscall number */
406 bsr $1, undo_switch_stack 572 UNDO_SWITCH_STACK
407 573
408 /* get the arguments back.. */ 574 /* get the arguments back.. */
409 ldq $16, SP_OFF+24($sp) 575 ldq $16, SP_OFF+24($sp)
@@ -431,9 +597,9 @@ ret_from_straced:
431$strace_success: 597$strace_success:
432 stq $0, 0($sp) /* save return value */ 598 stq $0, 0($sp) /* save return value */
433 599
434 bsr $1, do_switch_stack 600 DO_SWITCH_STACK
435 jsr $26, syscall_trace_leave 601 jsr $26, syscall_trace_leave
436 bsr $1, undo_switch_stack 602 UNDO_SWITCH_STACK
437 br $31, ret_from_sys_call 603 br $31, ret_from_sys_call
438 604
439 .align 3 605 .align 3
@@ -447,26 +613,31 @@ $strace_error:
447 stq $0, 0($sp) 613 stq $0, 0($sp)
448 stq $1, 72($sp) /* a3 for return */ 614 stq $1, 72($sp) /* a3 for return */
449 615
450 bsr $1, do_switch_stack 616 DO_SWITCH_STACK
451 mov $18, $9 /* save old syscall number */ 617 mov $18, $9 /* save old syscall number */
452 mov $19, $10 /* save old a3 */ 618 mov $19, $10 /* save old a3 */
453 jsr $26, syscall_trace_leave 619 jsr $26, syscall_trace_leave
454 mov $9, $18 620 mov $9, $18
455 mov $10, $19 621 mov $10, $19
456 bsr $1, undo_switch_stack 622 UNDO_SWITCH_STACK
457 623
458 mov $31, $26 /* tell "ret_from_sys_call" we can restart */ 624 mov $31, $26 /* tell "ret_from_sys_call" we can restart */
459 br ret_from_sys_call 625 br ret_from_sys_call
460.end strace 626CFI_END_OSF_FRAME entSys
461 627
462/* 628/*
463 * Save and restore the switch stack -- aka the balance of the user context. 629 * Save and restore the switch stack -- aka the balance of the user context.
464 */ 630 */
465 631
466 .align 4 632 .align 4
467 .ent do_switch_stack 633 .type do_switch_stack, @function
634 .cfi_startproc simple
635 .cfi_return_column 64
636 .cfi_def_cfa $sp, 0
637 .cfi_register 64, $1
468do_switch_stack: 638do_switch_stack:
469 lda $sp, -SWITCH_STACK_SIZE($sp) 639 lda $sp, -SWITCH_STACK_SIZE($sp)
640 .cfi_adjust_cfa_offset SWITCH_STACK_SIZE
470 stq $9, 0($sp) 641 stq $9, 0($sp)
471 stq $10, 8($sp) 642 stq $10, 8($sp)
472 stq $11, 16($sp) 643 stq $11, 16($sp)
@@ -510,10 +681,14 @@ do_switch_stack:
510 stt $f0, 312($sp) # save fpcr in slot of $f31 681 stt $f0, 312($sp) # save fpcr in slot of $f31
511 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. 682 ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
512 ret $31, ($1), 1 683 ret $31, ($1), 1
513.end do_switch_stack 684 .cfi_endproc
685 .size do_switch_stack, .-do_switch_stack
514 686
515 .align 4 687 .align 4
516 .ent undo_switch_stack 688 .type undo_switch_stack, @function
689 .cfi_startproc simple
690 .cfi_def_cfa $sp, 0
691 .cfi_register 64, $1
517undo_switch_stack: 692undo_switch_stack:
518 ldq $9, 0($sp) 693 ldq $9, 0($sp)
519 ldq $10, 8($sp) 694 ldq $10, 8($sp)
@@ -558,7 +733,8 @@ undo_switch_stack:
558 ldt $f30, 304($sp) 733 ldt $f30, 304($sp)
559 lda $sp, SWITCH_STACK_SIZE($sp) 734 lda $sp, SWITCH_STACK_SIZE($sp)
560 ret $31, ($1), 1 735 ret $31, ($1), 1
561.end undo_switch_stack 736 .cfi_endproc
737 .size undo_switch_stack, .-undo_switch_stack
562 738
563/* 739/*
564 * The meat of the context switch code. 740 * The meat of the context switch code.
@@ -566,17 +742,18 @@ undo_switch_stack:
566 742
567 .align 4 743 .align 4
568 .globl alpha_switch_to 744 .globl alpha_switch_to
569 .ent alpha_switch_to 745 .type alpha_switch_to, @function
746 .cfi_startproc
570alpha_switch_to: 747alpha_switch_to:
571 .prologue 0 748 DO_SWITCH_STACK
572 bsr $1, do_switch_stack
573 call_pal PAL_swpctx 749 call_pal PAL_swpctx
574 lda $8, 0x3fff 750 lda $8, 0x3fff
575 bsr $1, undo_switch_stack 751 UNDO_SWITCH_STACK
576 bic $sp, $8, $8 752 bic $sp, $8, $8
577 mov $17, $0 753 mov $17, $0
578 ret 754 ret
579.end alpha_switch_to 755 .cfi_endproc
756 .size alpha_switch_to, .-alpha_switch_to
580 757
581/* 758/*
582 * New processes begin life here. 759 * New processes begin life here.
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index f433fc11877a..28e4429596f3 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -236,7 +236,7 @@ void __init
236init_rtc_irq(void) 236init_rtc_irq(void)
237{ 237{
238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, 238 irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
239 handle_simple_irq, "RTC"); 239 handle_percpu_irq, "RTC");
240 setup_irq(RTC_IRQ, &timer_irqaction); 240 setup_irq(RTC_IRQ, &timer_irqaction);
241} 241}
242 242
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 7b60834fb4b2..9dbbcb3b9146 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -116,7 +116,7 @@ wait_boot_cpu_to_stop(int cpuid)
116/* 116/*
117 * Where secondaries begin a life of C. 117 * Where secondaries begin a life of C.
118 */ 118 */
119void __cpuinit 119void
120smp_callin(void) 120smp_callin(void)
121{ 121{
122 int cpuid = hard_smp_processor_id(); 122 int cpuid = hard_smp_processor_id();
@@ -194,7 +194,7 @@ wait_for_txrdy (unsigned long cpumask)
194 * Send a message to a secondary's console. "START" is one such 194 * Send a message to a secondary's console. "START" is one such
195 * interesting message. ;-) 195 * interesting message. ;-)
196 */ 196 */
197static void __cpuinit 197static void
198send_secondary_console_msg(char *str, int cpuid) 198send_secondary_console_msg(char *str, int cpuid)
199{ 199{
200 struct percpu_struct *cpu; 200 struct percpu_struct *cpu;
@@ -264,9 +264,10 @@ recv_secondary_console_msg(void)
264 if (cnt <= 0 || cnt >= 80) 264 if (cnt <= 0 || cnt >= 80)
265 strcpy(buf, "<<< BOGUS MSG >>>"); 265 strcpy(buf, "<<< BOGUS MSG >>>");
266 else { 266 else {
267 cp1 = (char *) &cpu->ipc_buffer[11]; 267 cp1 = (char *) &cpu->ipc_buffer[1];
268 cp2 = buf; 268 cp2 = buf;
269 strcpy(cp2, cp1); 269 memcpy(cp2, cp1, cnt);
270 cp2[cnt] = '\0';
270 271
271 while ((cp2 = strchr(cp2, '\r')) != 0) { 272 while ((cp2 = strchr(cp2, '\r')) != 0) {
272 *cp2 = ' '; 273 *cp2 = ' ';
@@ -285,7 +286,7 @@ recv_secondary_console_msg(void)
285/* 286/*
286 * Convince the console to have a secondary cpu begin execution. 287 * Convince the console to have a secondary cpu begin execution.
287 */ 288 */
288static int __cpuinit 289static int
289secondary_cpu_start(int cpuid, struct task_struct *idle) 290secondary_cpu_start(int cpuid, struct task_struct *idle)
290{ 291{
291 struct percpu_struct *cpu; 292 struct percpu_struct *cpu;
@@ -356,7 +357,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
356/* 357/*
357 * Bring one cpu online. 358 * Bring one cpu online.
358 */ 359 */
359static int __cpuinit 360static int
360smp_boot_one_cpu(int cpuid, struct task_struct *idle) 361smp_boot_one_cpu(int cpuid, struct task_struct *idle)
361{ 362{
362 unsigned long timeout; 363 unsigned long timeout;
@@ -472,7 +473,7 @@ smp_prepare_boot_cpu(void)
472{ 473{
473} 474}
474 475
475int __cpuinit 476int
476__cpu_up(unsigned int cpu, struct task_struct *tidle) 477__cpu_up(unsigned int cpu, struct task_struct *tidle)
477{ 478{
478 smp_boot_one_cpu(cpu, tidle); 479 smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index 5bf401f7ea97..6c35159bc00e 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = {
190static void 190static void
191dp264_device_interrupt(unsigned long vector) 191dp264_device_interrupt(unsigned long vector)
192{ 192{
193#if 1
194 printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
195#else
196 unsigned long pld; 193 unsigned long pld;
197 unsigned int i; 194 unsigned int i;
198 195
@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector)
210 isa_device_interrupt(vector); 207 isa_device_interrupt(vector);
211 else 208 else
212 handle_irq(16 + i); 209 handle_irq(16 + i);
213#if 0
214 TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
215 tmp = TSUNAMI_cchip->dir0.csr;
216#endif
217 } 210 }
218#endif
219} 211}
220 212
221static void 213static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 407accc80877..c92e389ff219 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -317,8 +317,9 @@ marvel_init_irq(void)
317} 317}
318 318
319static int 319static int
320marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 320marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
321{ 321{
322 struct pci_dev *dev = (struct pci_dev *)cdev;
322 struct pci_controller *hose = dev->sysdata; 323 struct pci_controller *hose = dev->sysdata;
323 struct io7_port *io7_port = hose->sysdata; 324 struct io7_port *io7_port = hose->sysdata;
324 struct io7 *io7 = io7_port->io7; 325 struct io7 *io7 = io7_port->io7;
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 4284ec798ec9..dca9b3fb0071 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -524,6 +524,8 @@ sys_call_table:
524 .quad sys_sendmmsg 524 .quad sys_sendmmsg
525 .quad sys_process_vm_readv 525 .quad sys_process_vm_readv
526 .quad sys_process_vm_writev /* 505 */ 526 .quad sys_process_vm_writev /* 505 */
527 .quad sys_kcmp
528 .quad sys_finit_module
527 529
528 .size sys_call_table, . - sys_call_table 530 .size sys_call_table, . - sys_call_table
529 .type sys_call_table, @object 531 .type sys_call_table, @object
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index e336694ca042..ea3395036556 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -105,9 +105,7 @@ void arch_irq_work_raise(void)
105 105
106static inline __u32 rpcc(void) 106static inline __u32 rpcc(void)
107{ 107{
108 __u32 result; 108 return __builtin_alpha_rpcc();
109 asm volatile ("rpcc %0" : "=r"(result));
110 return result;
111} 109}
112 110
113int update_persistent_clock(struct timespec now) 111int update_persistent_clock(struct timespec now)
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index affccb959a9e..bd0665cdc840 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -32,7 +32,7 @@
32 32
33static int opDEC_fix; 33static int opDEC_fix;
34 34
35static void __cpuinit 35static void
36opDEC_check(void) 36opDEC_check(void)
37{ 37{
38 __asm__ __volatile__ ( 38 __asm__ __volatile__ (
@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
66{ 66{
67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", 67 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
68 regs->pc, regs->r26, regs->ps, print_tainted()); 68 regs->pc, regs->r26, regs->ps, print_tainted());
69 print_symbol("pc is at %s\n", regs->pc); 69 printk("pc is at %pSR\n", (void *)regs->pc);
70 print_symbol("ra is at %s\n", regs->r26 ); 70 printk("ra is at %pSR\n", (void *)regs->r26);
71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", 71 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
72 regs->r0, regs->r1, regs->r2); 72 regs->r0, regs->r1, regs->r2);
73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", 73 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp)
132 continue; 132 continue;
133 if (tmp >= (unsigned long) &_etext) 133 if (tmp >= (unsigned long) &_etext)
134 continue; 134 continue;
135 printk("[<%lx>]", tmp); 135 printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
136 print_symbol(" %s", tmp);
137 printk("\n");
138 if (i > 40) { 136 if (i > 40) {
139 printk(" ..."); 137 printk(" ...");
140 break; 138 break;
@@ -1059,7 +1057,7 @@ give_sigbus:
1059 return; 1057 return;
1060} 1058}
1061 1059
1062void __cpuinit 1060void
1063trap_init(void) 1061trap_init(void)
1064{ 1062{
1065 /* Tell PAL-code what global pointer we want in the kernel. */ 1063 /* Tell PAL-code what global pointer we want in the kernel. */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0c..37c0f4e978d4 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1600,8 +1600,7 @@ config LOCAL_TIMERS
1600config ARCH_NR_GPIO 1600config ARCH_NR_GPIO
1601 int 1601 int
1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA 1602 default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
1603 default 512 if SOC_OMAP5 1603 default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5
1604 default 512 if ARCH_KEYSTONE
1605 default 392 if ARCH_U8500 1604 default 392 if ARCH_U8500
1606 default 352 if ARCH_VT8500 1605 default 352 if ARCH_VT8500
1607 default 288 if ARCH_SUNXI 1606 default 288 if ARCH_SUNXI
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 9866cd736dee..a0f2721ea583 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -485,6 +485,12 @@
485 sirf,function = "usp0"; 485 sirf,function = "usp0";
486 }; 486 };
487 }; 487 };
488 usp0_uart_nostreamctrl_pins_a: usp0@1 {
489 usp0 {
490 sirf,pins = "usp0_uart_nostreamctrl_grp";
491 sirf,function = "usp0_uart_nostreamctrl";
492 };
493 };
488 usp1_pins_a: usp1@0 { 494 usp1_pins_a: usp1@0 {
489 usp1 { 495 usp1 {
490 sirf,pins = "usp1grp"; 496 sirf,pins = "usp1grp";
@@ -515,16 +521,16 @@
515 sirf,function = "pulse_count"; 521 sirf,function = "pulse_count";
516 }; 522 };
517 }; 523 };
518 cko0_rst_pins_a: cko0_rst@0 { 524 cko0_pins_a: cko0@0 {
519 cko0_rst { 525 cko0 {
520 sirf,pins = "cko0_rstgrp"; 526 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 527 sirf,function = "cko0";
522 }; 528 };
523 }; 529 };
524 cko1_rst_pins_a: cko1_rst@0 { 530 cko1_pins_a: cko1@0 {
525 cko1_rst { 531 cko1 {
526 sirf,pins = "cko1_rstgrp"; 532 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 533 sirf,function = "cko1";
528 }; 534 };
529 }; 535 };
530 }; 536 };
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 43bf3c796cba..0e7fed47bd8d 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -147,7 +147,7 @@
147 reg = <0x0a>; 147 reg = <0x0a>;
148 VDDA-supply = <&reg_3p3v>; 148 VDDA-supply = <&reg_3p3v>;
149 VDDIO-supply = <&reg_3p3v>; 149 VDDIO-supply = <&reg_3p3v>;
150 150 clocks = <&saif0>;
151 }; 151 };
152 152
153 pcf8563: rtc@51 { 153 pcf8563: rtc@51 {
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 1f0d38d7b16f..e035f4664b97 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -195,7 +195,7 @@
195 reg = <0x0a>; 195 reg = <0x0a>;
196 VDDA-supply = <&reg_3p3v>; 196 VDDA-supply = <&reg_3p3v>;
197 VDDIO-supply = <&reg_3p3v>; 197 VDDIO-supply = <&reg_3p3v>;
198 198 clocks = <&saif0>;
199 }; 199 };
200 200
201 at24@51 { 201 at24@51 {
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 880df2f13be8..44d9da57736e 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -184,7 +184,7 @@
184 reg = <0x0a>; 184 reg = <0x0a>;
185 VDDA-supply = <&reg_3p3v>; 185 VDDA-supply = <&reg_3p3v>;
186 VDDIO-supply = <&reg_3p3v>; 186 VDDIO-supply = <&reg_3p3v>;
187 187 clocks = <&saif0>;
188 }; 188 };
189 189
190 eeprom: eeprom@51 { 190 eeprom: eeprom@51 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 6a8acb01b1d3..9524a0571281 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -837,6 +837,7 @@
837 compatible = "fsl,imx28-saif"; 837 compatible = "fsl,imx28-saif";
838 reg = <0x80042000 0x2000>; 838 reg = <0x80042000 0x2000>;
839 interrupts = <59 80>; 839 interrupts = <59 80>;
840 #clock-cells = <0>;
840 clocks = <&clks 53>; 841 clocks = <&clks 53>;
841 dmas = <&dma_apbx 4>; 842 dmas = <&dma_apbx 4>;
842 dma-names = "rx-tx"; 843 dma-names = "rx-tx";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6dd9486c755b..ad3471ca17c7 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -61,6 +61,16 @@
61 mux-int-port = <2>; 61 mux-int-port = <2>;
62 mux-ext-port = <3>; 62 mux-ext-port = <3>;
63 }; 63 };
64
65 clocks {
66 clk_26M: codec_clock {
67 compatible = "fixed-clock";
68 reg=<0>;
69 #clock-cells = <0>;
70 clock-frequency = <26000000>;
71 gpios = <&gpio4 26 1>;
72 };
73 };
64}; 74};
65 75
66&esdhc1 { 76&esdhc1 {
@@ -229,6 +239,7 @@
229 MX51_PAD_EIM_A27__GPIO2_21 0x5 239 MX51_PAD_EIM_A27__GPIO2_21 0x5
230 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85 240 MX51_PAD_CSPI1_SS0__GPIO4_24 0x85
231 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85 241 MX51_PAD_CSPI1_SS1__GPIO4_25 0x85
242 MX51_PAD_CSPI1_RDY__GPIO4_26 0x80000000
232 >; 243 >;
233 }; 244 };
234 }; 245 };
@@ -255,7 +266,7 @@
255 sgtl5000: codec@0a { 266 sgtl5000: codec@0a {
256 compatible = "fsl,sgtl5000"; 267 compatible = "fsl,sgtl5000";
257 reg = <0x0a>; 268 reg = <0x0a>;
258 clock-frequency = <26000000>; 269 clocks = <&clk_26M>;
259 VDDA-supply = <&vdig_reg>; 270 VDDA-supply = <&vdig_reg>;
260 VDDIO-supply = <&vvideo_reg>; 271 VDDIO-supply = <&vvideo_reg>;
261 }; 272 };
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index aaa33bc99f78..a63090267941 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -27,7 +27,7 @@
27 27
28 backlight { 28 backlight {
29 compatible = "pwm-backlight"; 29 compatible = "pwm-backlight";
30 pwms = <&pwm2 0 50000 0 0>; 30 pwms = <&pwm2 0 50000>;
31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>; 31 brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
32 default-brightness-level = <10>; 32 default-brightness-level = <10>;
33 enable-gpios = <&gpio7 7 0>; 33 enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 3895fbba8fce..569aa9f2c4ed 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -725,15 +725,15 @@
725 uart1 { 725 uart1 {
726 pinctrl_uart1_1: uart1grp-1 { 726 pinctrl_uart1_1: uart1grp-1 {
727 fsl,pins = < 727 fsl,pins = <
728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1c5 728 MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1c5 729 MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
730 >; 730 >;
731 }; 731 };
732 732
733 pinctrl_uart1_2: uart1grp-2 { 733 pinctrl_uart1_2: uart1grp-2 {
734 fsl,pins = < 734 fsl,pins = <
735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1c5 735 MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1c5 736 MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
737 >; 737 >;
738 }; 738 };
739 739
@@ -748,8 +748,8 @@
748 uart2 { 748 uart2 {
749 pinctrl_uart2_1: uart2grp-1 { 749 pinctrl_uart2_1: uart2grp-1 {
750 fsl,pins = < 750 fsl,pins = <
751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1c5 751 MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1c5 752 MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
753 >; 753 >;
754 }; 754 };
755 755
@@ -766,17 +766,17 @@
766 uart3 { 766 uart3 {
767 pinctrl_uart3_1: uart3grp-1 { 767 pinctrl_uart3_1: uart3grp-1 {
768 fsl,pins = < 768 fsl,pins = <
769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 769 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 770 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1c5 771 MX53_PAD_PATA_DA_1__UART3_CTS 0x1e4
772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1c5 772 MX53_PAD_PATA_DA_2__UART3_RTS 0x1e4
773 >; 773 >;
774 }; 774 };
775 775
776 pinctrl_uart3_2: uart3grp-2 { 776 pinctrl_uart3_2: uart3grp-2 {
777 fsl,pins = < 777 fsl,pins = <
778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5 778 MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5 779 MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
780 >; 780 >;
781 }; 781 };
782 782
@@ -785,8 +785,8 @@
785 uart4 { 785 uart4 {
786 pinctrl_uart4_1: uart4grp-1 { 786 pinctrl_uart4_1: uart4grp-1 {
787 fsl,pins = < 787 fsl,pins = <
788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1c5 788 MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1e4
789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1c5 789 MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1e4
790 >; 790 >;
791 }; 791 };
792 }; 792 };
@@ -794,8 +794,8 @@
794 uart5 { 794 uart5 {
795 pinctrl_uart5_1: uart5grp-1 { 795 pinctrl_uart5_1: uart5grp-1 {
796 fsl,pins = < 796 fsl,pins = <
797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1c5 797 MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1e4
798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1c5 798 MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1e4
799 >; 799 >;
800 }; 800 };
801 }; 801 };
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 05e9489cf95c..bbeb623fc2c6 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -515,16 +515,16 @@
515 sirf,function = "pulse_count"; 515 sirf,function = "pulse_count";
516 }; 516 };
517 }; 517 };
518 cko0_rst_pins_a: cko0_rst@0 { 518 cko0_pins_a: cko0@0 {
519 cko0_rst { 519 cko0 {
520 sirf,pins = "cko0_rstgrp"; 520 sirf,pins = "cko0grp";
521 sirf,function = "cko0_rst"; 521 sirf,function = "cko0";
522 }; 522 };
523 }; 523 };
524 cko1_rst_pins_a: cko1_rst@0 { 524 cko1_pins_a: cko1@0 {
525 cko1_rst { 525 cko1 {
526 sirf,pins = "cko1_rstgrp"; 526 sirf,pins = "cko1grp";
527 sirf,function = "cko1_rst"; 527 sirf,function = "cko1";
528 }; 528 };
529 }; 529 };
530 }; 530 };
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 957b21a71b4b..0f246c979262 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -166,6 +166,15 @@
166 reg = <0x9000 0x100>; 166 reg = <0x9000 0x100>;
167 st,bank-name = "PIO31"; 167 st,bank-name = "PIO31";
168 }; 168 };
169
170 serial2-oe {
171 pinctrl_serial2_oe: serial2-1 {
172 st,pins {
173 output-enable = <&PIO11 3 ALT2 OUT>;
174 };
175 };
176 };
177
169 }; 178 };
170 179
171 pin-controller-rear { 180 pin-controller-rear {
@@ -218,7 +227,6 @@
218 st,pins { 227 st,pins {
219 tx = <&PIO17 4 ALT2 OUT>; 228 tx = <&PIO17 4 ALT2 OUT>;
220 rx = <&PIO17 5 ALT2 IN>; 229 rx = <&PIO17 5 ALT2 IN>;
221 output-enable = <&PIO11 3 ALT2 OUT>;
222 }; 230 };
223 }; 231 };
224 }; 232 };
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 3cecd9689a49..1a0326ea7d07 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -79,7 +79,7 @@
79 interrupts = <0 197 0>; 79 interrupts = <0 197 0>;
80 clocks = <&CLK_S_ICN_REG_0>; 80 clocks = <&CLK_S_ICN_REG_0>;
81 pinctrl-names = "default"; 81 pinctrl-names = "default";
82 pinctrl-0 = <&pinctrl_serial2>; 82 pinctrl-0 = <&pinctrl_serial2 &pinctrl_serial2_oe>;
83 }; 83 };
84 84
85 /* SBC_UART1 */ 85 /* SBC_UART1 */
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index b3034da00a37..ae6a17aed9ee 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -47,6 +47,12 @@
47 regulator-max-microvolt = <3150000>; 47 regulator-max-microvolt = <3150000>;
48 }; 48 };
49 49
50 vmmc2: regulator-vmmc2 {
51 compatible = "ti,twl4030-vmmc2";
52 regulator-min-microvolt = <1850000>;
53 regulator-max-microvolt = <3150000>;
54 };
55
50 vusb1v5: regulator-vusb1v5 { 56 vusb1v5: regulator-vusb1v5 {
51 compatible = "ti,twl4030-vusb1v5"; 57 compatible = "ti,twl4030-vusb1v5";
52 }; 58 };
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index e1eb7dadda80..67d929cf9804 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -442,8 +442,8 @@
442 compatible = "fsl,mvf600-fec"; 442 compatible = "fsl,mvf600-fec";
443 reg = <0x400d0000 0x1000>; 443 reg = <0x400d0000 0x1000>;
444 interrupts = <0 78 0x04>; 444 interrupts = <0 78 0x04>;
445 clocks = <&clks VF610_CLK_ENET>, 445 clocks = <&clks VF610_CLK_ENET0>,
446 <&clks VF610_CLK_ENET>, 446 <&clks VF610_CLK_ENET0>,
447 <&clks VF610_CLK_ENET>; 447 <&clks VF610_CLK_ENET>;
448 clock-names = "ipg", "ahb", "ptp"; 448 clock-names = "ipg", "ahb", "ptp";
449 status = "disabled"; 449 status = "disabled";
@@ -453,8 +453,8 @@
453 compatible = "fsl,mvf600-fec"; 453 compatible = "fsl,mvf600-fec";
454 reg = <0x400d1000 0x1000>; 454 reg = <0x400d1000 0x1000>;
455 interrupts = <0 79 0x04>; 455 interrupts = <0 79 0x04>;
456 clocks = <&clks VF610_CLK_ENET>, 456 clocks = <&clks VF610_CLK_ENET1>,
457 <&clks VF610_CLK_ENET>, 457 <&clks VF610_CLK_ENET1>,
458 <&clks VF610_CLK_ENET>; 458 <&clks VF610_CLK_ENET>;
459 clock-names = "ipg", "ahb", "ptp"; 459 clock-names = "ipg", "ahb", "ptp";
460 status = "disabled"; 460 status = "disabled";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index a432e6c1dac1..39ad030ac0c7 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,7 +26,6 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/edma.h> 28#include <linux/edma.h>
29#include <linux/err.h>
30#include <linux/of_address.h> 29#include <linux/of_address.h>
31#include <linux/of_device.h> 30#include <linux/of_device.h>
32#include <linux/of_dma.h> 31#include <linux/of_dma.h>
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 510e5b13aa2e..1bc34c7567fd 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,7 +19,7 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) 22static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
23{ 23{
24 unsigned int mpidr, pcpu, pcluster, ret; 24 unsigned int mpidr, pcpu, pcluster, ret;
25 extern void secondary_startup(void); 25 extern void secondary_startup(void);
@@ -40,7 +40,7 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
40 return 0; 40 return 0;
41} 41}
42 42
43static void __cpuinit mcpm_secondary_init(unsigned int cpu) 43static void mcpm_secondary_init(unsigned int cpu)
44{ 44{
45 mcpm_cpu_powered_up(); 45 mcpm_cpu_powered_up();
46} 46}
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 7c868139bdb0..1571bea48bed 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -102,6 +102,8 @@ CONFIG_SND_SOC=m
102CONFIG_SND_DAVINCI_SOC=m 102CONFIG_SND_DAVINCI_SOC=m
103# CONFIG_HID_SUPPORT is not set 103# CONFIG_HID_SUPPORT is not set
104# CONFIG_USB_SUPPORT is not set 104# CONFIG_USB_SUPPORT is not set
105CONFIG_DMADEVICES=y
106CONFIG_TI_EDMA=y
105CONFIG_EXT2_FS=y 107CONFIG_EXT2_FS=y
106CONFIG_EXT3_FS=y 108CONFIG_EXT3_FS=y
107CONFIG_XFS_FS=m 109CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c86fd75e181a..ab2f7378352c 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -162,6 +162,8 @@ CONFIG_LEDS_TRIGGERS=y
162CONFIG_LEDS_TRIGGER_TIMER=m 162CONFIG_LEDS_TRIGGER_TIMER=m
163CONFIG_LEDS_TRIGGER_HEARTBEAT=m 163CONFIG_LEDS_TRIGGER_HEARTBEAT=m
164CONFIG_RTC_CLASS=y 164CONFIG_RTC_CLASS=y
165CONFIG_DMADEVICES=y
166CONFIG_TI_EDMA=y
165CONFIG_EXT2_FS=y 167CONFIG_EXT2_FS=y
166CONFIG_EXT3_FS=y 168CONFIG_EXT3_FS=y
167CONFIG_XFS_FS=m 169CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index fe0bdc361d2c..6e572c64cf5a 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -53,6 +53,7 @@ CONFIG_IP_PNP=y
53CONFIG_IP_PNP_DHCP=y 53CONFIG_IP_PNP_DHCP=y
54CONFIG_DEVTMPFS=y 54CONFIG_DEVTMPFS=y
55CONFIG_DEVTMPFS_MOUNT=y 55CONFIG_DEVTMPFS_MOUNT=y
56CONFIG_OMAP_OCP2SCP=y
56CONFIG_BLK_DEV_SD=y 57CONFIG_BLK_DEV_SD=y
57CONFIG_ATA=y 58CONFIG_ATA=y
58CONFIG_SATA_AHCI_PLATFORM=y 59CONFIG_SATA_AHCI_PLATFORM=y
@@ -61,6 +62,7 @@ CONFIG_SATA_MV=y
61CONFIG_NETDEVICES=y 62CONFIG_NETDEVICES=y
62CONFIG_SUN4I_EMAC=y 63CONFIG_SUN4I_EMAC=y
63CONFIG_NET_CALXEDA_XGMAC=y 64CONFIG_NET_CALXEDA_XGMAC=y
65CONFIG_KS8851=y
64CONFIG_SMSC911X=y 66CONFIG_SMSC911X=y
65CONFIG_STMMAC_ETH=y 67CONFIG_STMMAC_ETH=y
66CONFIG_MDIO_SUN4I=y 68CONFIG_MDIO_SUN4I=y
@@ -89,6 +91,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
89CONFIG_I2C_SIRF=y 91CONFIG_I2C_SIRF=y
90CONFIG_I2C_TEGRA=y 92CONFIG_I2C_TEGRA=y
91CONFIG_SPI=y 93CONFIG_SPI=y
94CONFIG_SPI_OMAP24XX=y
92CONFIG_SPI_PL022=y 95CONFIG_SPI_PL022=y
93CONFIG_SPI_SIRF=y 96CONFIG_SPI_SIRF=y
94CONFIG_SPI_TEGRA114=y 97CONFIG_SPI_TEGRA114=y
@@ -111,11 +114,12 @@ CONFIG_FB_SIMPLE=y
111CONFIG_USB=y 114CONFIG_USB=y
112CONFIG_USB_XHCI_HCD=y 115CONFIG_USB_XHCI_HCD=y
113CONFIG_USB_EHCI_HCD=y 116CONFIG_USB_EHCI_HCD=y
114CONFIG_USB_EHCI_MXC=y
115CONFIG_USB_EHCI_TEGRA=y 117CONFIG_USB_EHCI_TEGRA=y
116CONFIG_USB_EHCI_HCD_PLATFORM=y 118CONFIG_USB_EHCI_HCD_PLATFORM=y
117CONFIG_USB_ISP1760_HCD=y 119CONFIG_USB_ISP1760_HCD=y
118CONFIG_USB_STORAGE=y 120CONFIG_USB_STORAGE=y
121CONFIG_USB_CHIPIDEA=y
122CONFIG_USB_CHIPIDEA_HOST=y
119CONFIG_AB8500_USB=y 123CONFIG_AB8500_USB=y
120CONFIG_NOP_USB_XCEIV=y 124CONFIG_NOP_USB_XCEIV=y
121CONFIG_OMAP_USB2=y 125CONFIG_OMAP_USB2=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 35f8cf299fa2..263ae3869e32 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -1,6 +1,8 @@
1# CONFIG_LOCALVERSION_AUTO is not set 1# CONFIG_LOCALVERSION_AUTO is not set
2# CONFIG_SWAP is not set 2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y 3CONFIG_SYSVIPC=y
4CONFIG_NO_HZ_IDLE=y
5CONFIG_HIGH_RES_TIMERS=y
4CONFIG_IKCONFIG=y 6CONFIG_IKCONFIG=y
5CONFIG_IKCONFIG_PROC=y 7CONFIG_IKCONFIG_PROC=y
6CONFIG_LOG_BUF_SHIFT=14 8CONFIG_LOG_BUF_SHIFT=14
@@ -48,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
48CONFIG_MTD=y 50CONFIG_MTD=y
49CONFIG_MTD_TESTS=m 51CONFIG_MTD_TESTS=m
50CONFIG_MTD_CMDLINE_PARTS=y 52CONFIG_MTD_CMDLINE_PARTS=y
51CONFIG_MTD_CHAR=y
52CONFIG_MTD_BLOCK=y 53CONFIG_MTD_BLOCK=y
53CONFIG_MTD_NAND_ECC_SMC=y 54CONFIG_MTD_NAND_ECC_SMC=y
54CONFIG_MTD_NAND=y 55CONFIG_MTD_NAND=y
@@ -94,8 +95,10 @@ CONFIG_I2C_GPIO=y
94CONFIG_I2C_NOMADIK=y 95CONFIG_I2C_NOMADIK=y
95CONFIG_DEBUG_GPIO=y 96CONFIG_DEBUG_GPIO=y
96# CONFIG_HWMON is not set 97# CONFIG_HWMON is not set
98CONFIG_REGULATOR=y
97CONFIG_MMC=y 99CONFIG_MMC=y
98CONFIG_MMC_CLKGATE=y 100CONFIG_MMC_UNSAFE_RESUME=y
101# CONFIG_MMC_BLOCK_BOUNCE is not set
99CONFIG_MMC_ARMMMCI=y 102CONFIG_MMC_ARMMMCI=y
100CONFIG_NEW_LEDS=y 103CONFIG_NEW_LEDS=y
101CONFIG_LEDS_CLASS=y 104CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index accefe099182..e406d575c94f 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -89,7 +89,7 @@ static inline u64 arch_counter_get_cntvct(void)
89 return cval; 89 return cval;
90} 90}
91 91
92static inline void __cpuinit arch_counter_set_user_access(void) 92static inline void arch_counter_set_user_access(void)
93{ 93{
94 u32 cntkctl; 94 u32 cntkctl;
95 95
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 76ab5ca50610..47cd974e57ea 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -149,7 +149,6 @@ ENDPROC(lookup_processor_type)
149 * r5 = proc_info pointer in physical address space 149 * r5 = proc_info pointer in physical address space
150 * r9 = cpuid (preserved) 150 * r9 = cpuid (preserved)
151 */ 151 */
152 __CPUINIT
153__lookup_processor_type: 152__lookup_processor_type:
154 adr r3, __lookup_processor_type_data 153 adr r3, __lookup_processor_type_data
155 ldmia r3, {r4 - r6} 154 ldmia r3, {r4 - r6}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 75f14cc3e073..b361de143756 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,7 +87,6 @@ ENTRY(stext)
87ENDPROC(stext) 87ENDPROC(stext)
88 88
89#ifdef CONFIG_SMP 89#ifdef CONFIG_SMP
90 __CPUINIT
91ENTRY(secondary_startup) 90ENTRY(secondary_startup)
92 /* 91 /*
93 * Common entry point for secondary CPUs. 92 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 45e8935cae4e..9cf6063020ae 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,7 +343,6 @@ __turn_mmu_on_loc:
343 .long __turn_mmu_on_end 343 .long __turn_mmu_on_end
344 344
345#if defined(CONFIG_SMP) 345#if defined(CONFIG_SMP)
346 __CPUINIT
347ENTRY(secondary_startup) 346ENTRY(secondary_startup)
348 /* 347 /*
349 * Common entry point for secondary CPUs. 348 * Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..7b95de601357 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1020,7 +1020,7 @@ out_mdbgen:
1020 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu)); 1020 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
1021} 1021}
1022 1022
1023static int __cpuinit dbg_reset_notify(struct notifier_block *self, 1023static int dbg_reset_notify(struct notifier_block *self,
1024 unsigned long action, void *cpu) 1024 unsigned long action, void *cpu)
1025{ 1025{
1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) 1026 if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
1029 return NOTIFY_OK; 1029 return NOTIFY_OK;
1030} 1030}
1031 1031
1032static struct notifier_block __cpuinitdata dbg_reset_nb = { 1032static struct notifier_block dbg_reset_nb = {
1033 .notifier_call = dbg_reset_notify, 1033 .notifier_call = dbg_reset_notify,
1034}; 1034};
1035 1035
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..aebe0e99c153 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -157,8 +157,8 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
157 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading 157 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
158 * junk values out of them. 158 * junk values out of them.
159 */ 159 */
160static int __cpuinit cpu_pmu_notify(struct notifier_block *b, 160static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
161 unsigned long action, void *hcpu) 161 void *hcpu)
162{ 162{
163 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) 163 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
164 return NOTIFY_DONE; 164 return NOTIFY_DONE;
@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
171 return NOTIFY_OK; 171 return NOTIFY_OK;
172} 172}
173 173
174static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = { 174static struct notifier_block cpu_pmu_hotplug_notifier = {
175 .notifier_call = cpu_pmu_notify, 175 .notifier_call = cpu_pmu_notify,
176}; 176};
177 177
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index 219f1d73572a..70ded3fb42d9 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -46,8 +46,7 @@
46 46
47extern void secondary_startup(void); 47extern void secondary_startup(void);
48 48
49static int __cpuinit psci_boot_secondary(unsigned int cpu, 49static int psci_boot_secondary(unsigned int cpu, struct task_struct *idle)
50 struct task_struct *idle)
51{ 50{
52 if (psci_ops.cpu_on) 51 if (psci_ops.cpu_on)
53 return psci_ops.cpu_on(cpu_logical_map(cpu), 52 return psci_ops.cpu_on(cpu_logical_map(cpu),
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c5fb5469054b..c2b4f8f0be9a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,7 +58,7 @@ struct secondary_data secondary_data;
58 * control for which core is the next to come out of the secondary 58 * control for which core is the next to come out of the secondary
59 * boot "holding pen" 59 * boot "holding pen"
60 */ 60 */
61volatile int __cpuinitdata pen_release = -1; 61volatile int pen_release = -1;
62 62
63enum ipi_msg_type { 63enum ipi_msg_type {
64 IPI_WAKEUP, 64 IPI_WAKEUP,
@@ -86,7 +86,7 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
86 return pgdir >> ARCH_PGD_SHIFT; 86 return pgdir >> ARCH_PGD_SHIFT;
87} 87}
88 88
89int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 89int __cpu_up(unsigned int cpu, struct task_struct *idle)
90{ 90{
91 int ret; 91 int ret;
92 92
@@ -138,7 +138,7 @@ void __init smp_init_cpus(void)
138 smp_ops.smp_init_cpus(); 138 smp_ops.smp_init_cpus();
139} 139}
140 140
141int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 141int boot_secondary(unsigned int cpu, struct task_struct *idle)
142{ 142{
143 if (smp_ops.smp_boot_secondary) 143 if (smp_ops.smp_boot_secondary)
144 return smp_ops.smp_boot_secondary(cpu, idle); 144 return smp_ops.smp_boot_secondary(cpu, idle);
@@ -170,7 +170,7 @@ static int platform_cpu_disable(unsigned int cpu)
170/* 170/*
171 * __cpu_disable runs on the processor to be shutdown. 171 * __cpu_disable runs on the processor to be shutdown.
172 */ 172 */
173int __cpuinit __cpu_disable(void) 173int __cpu_disable(void)
174{ 174{
175 unsigned int cpu = smp_processor_id(); 175 unsigned int cpu = smp_processor_id();
176 int ret; 176 int ret;
@@ -216,7 +216,7 @@ static DECLARE_COMPLETION(cpu_died);
216 * called on the thread which is asking for a CPU to be shutdown - 216 * called on the thread which is asking for a CPU to be shutdown -
217 * waits until shutdown has completed, or it is timed out. 217 * waits until shutdown has completed, or it is timed out.
218 */ 218 */
219void __cpuinit __cpu_die(unsigned int cpu) 219void __cpu_die(unsigned int cpu)
220{ 220{
221 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { 221 if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
222 pr_err("CPU%u: cpu didn't die\n", cpu); 222 pr_err("CPU%u: cpu didn't die\n", cpu);
@@ -306,7 +306,7 @@ void __ref cpu_die(void)
306 * Called by both boot and secondaries to move global data into 306 * Called by both boot and secondaries to move global data into
307 * per-processor storage. 307 * per-processor storage.
308 */ 308 */
309static void __cpuinit smp_store_cpu_info(unsigned int cpuid) 309static void smp_store_cpu_info(unsigned int cpuid)
310{ 310{
311 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); 311 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
312 312
@@ -322,7 +322,7 @@ static void percpu_timer_setup(void);
322 * This is the secondary CPU boot entry. We're using this CPUs 322 * This is the secondary CPU boot entry. We're using this CPUs
323 * idle thread stack, but a set of temporary page tables. 323 * idle thread stack, but a set of temporary page tables.
324 */ 324 */
325asmlinkage void __cpuinit secondary_start_kernel(void) 325asmlinkage void secondary_start_kernel(void)
326{ 326{
327 struct mm_struct *mm = &init_mm; 327 struct mm_struct *mm = &init_mm;
328 unsigned int cpu; 328 unsigned int cpu;
@@ -521,7 +521,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
521{ 521{
522} 522}
523 523
524static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt) 524static void broadcast_timer_setup(struct clock_event_device *evt)
525{ 525{
526 evt->name = "dummy_timer"; 526 evt->name = "dummy_timer";
527 evt->features = CLOCK_EVT_FEAT_ONESHOT | 527 evt->features = CLOCK_EVT_FEAT_ONESHOT |
@@ -550,7 +550,7 @@ int local_timer_register(struct local_timer_ops *ops)
550} 550}
551#endif 551#endif
552 552
553static void __cpuinit percpu_timer_setup(void) 553static void percpu_timer_setup(void)
554{ 554{
555 unsigned int cpu = smp_processor_id(); 555 unsigned int cpu = smp_processor_id();
556 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 556 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index f6fd1d4398c6..25956204ef23 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -187,7 +187,7 @@ core_initcall(twd_cpufreq_init);
187 187
188#endif 188#endif
189 189
190static void __cpuinit twd_calibrate_rate(void) 190static void twd_calibrate_rate(void)
191{ 191{
192 unsigned long count; 192 unsigned long count;
193 u64 waitjiffies; 193 u64 waitjiffies;
@@ -265,7 +265,7 @@ static void twd_get_clock(struct device_node *np)
265/* 265/*
266 * Setup the local clock events for a CPU. 266 * Setup the local clock events for a CPU.
267 */ 267 */
268static int __cpuinit twd_timer_setup(struct clock_event_device *clk) 268static int twd_timer_setup(struct clock_event_device *clk)
269{ 269{
270 struct clock_event_device **this_cpu_clk; 270 struct clock_event_device **this_cpu_clk;
271 int cpu = smp_processor_id(); 271 int cpu = smp_processor_id();
@@ -308,7 +308,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
308 return 0; 308 return 0;
309} 309}
310 310
311static struct local_timer_ops twd_lt_ops __cpuinitdata = { 311static struct local_timer_ops twd_lt_ops = {
312 .setup = twd_timer_setup, 312 .setup = twd_timer_setup,
313 .stop = twd_timer_stop, 313 .stop = twd_timer_stop,
314}; 314};
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
index 64dbfa57204a..5306de350133 100644
--- a/arch/arm/lib/delay.c
+++ b/arch/arm/lib/delay.c
@@ -86,7 +86,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
86 } 86 }
87} 87}
88 88
89unsigned long __cpuinit calibrate_delay_is_known(void) 89unsigned long calibrate_delay_is_known(void)
90{ 90{
91 delay_calibrated = true; 91 delay_calibrated = true;
92 return lpj_fine; 92 return lpj_fine;
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index afbc439f11d4..4cdb61c54459 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -505,7 +505,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
505/* 505/*
506 * Amplifiers on the board 506 * Amplifiers on the board
507 */ 507 */
508struct ths7303_platform_data ths7303_pdata = { 508static struct ths7303_platform_data ths7303_pdata = {
509 .ch_1 = 3, 509 .ch_1 = 3,
510 .ch_2 = 3, 510 .ch_2 = 3,
511 .ch_3 = 3, 511 .ch_3 = 3,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 42ef53f62c6c..86100d179694 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -860,7 +860,7 @@ static struct platform_device dm355_vpbe_display = {
860 }, 860 },
861}; 861};
862 862
863struct venc_platform_data dm355_venc_pdata = { 863static struct venc_platform_data dm355_venc_pdata = {
864 .setup_pinmux = dm355_vpbe_setup_pinmux, 864 .setup_pinmux = dm355_vpbe_setup_pinmux,
865 .setup_clock = dm355_venc_setup_clock, 865 .setup_clock = dm355_venc_setup_clock,
866}; 866};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index fa7af5eda52d..dad28029ba9b 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1349,7 +1349,7 @@ static struct platform_device dm365_vpbe_display = {
1349 }, 1349 },
1350}; 1350};
1351 1351
1352struct venc_platform_data dm365_venc_pdata = { 1352static struct venc_platform_data dm365_venc_pdata = {
1353 .setup_pinmux = dm365_vpbe_setup_pinmux, 1353 .setup_pinmux = dm365_vpbe_setup_pinmux,
1354 .setup_clock = dm365_venc_setup_clock, 1354 .setup_clock = dm365_venc_setup_clock,
1355}; 1355};
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 855d4a7b462d..5952e68c76c4 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -92,6 +92,7 @@ config SOC_EXYNOS5440
92 bool "SAMSUNG EXYNOS5440" 92 bool "SAMSUNG EXYNOS5440"
93 default y 93 default y
94 depends on ARCH_EXYNOS5 94 depends on ARCH_EXYNOS5
95 select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
95 select ARCH_HAS_OPP 96 select ARCH_HAS_OPP
96 select HAVE_ARM_ARCH_TIMER 97 select HAVE_ARM_ARCH_TIMER
97 select AUTO_ZRELADDR 98 select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index e970a7a4e278..53696154aead 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -14,7 +14,7 @@ obj- :=
14 14
15obj-$(CONFIG_ARCH_EXYNOS) += common.o 15obj-$(CONFIG_ARCH_EXYNOS) += common.o
16 16
17obj-$(CONFIG_PM) += pm.o 17obj-$(CONFIG_S5P_PM) += pm.o
18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o 18obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
19obj-$(CONFIG_CPU_IDLE) += cpuidle.o 19obj-$(CONFIG_CPU_IDLE) += cpuidle.o
20 20
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 164685bd25c8..ba95e5db2501 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -58,7 +58,6 @@ static const char name_exynos5440[] = "EXYNOS5440";
58 58
59static void exynos4_map_io(void); 59static void exynos4_map_io(void);
60static void exynos5_map_io(void); 60static void exynos5_map_io(void);
61static void exynos5440_map_io(void);
62static int exynos_init(void); 61static int exynos_init(void);
63 62
64static struct cpu_table cpu_ids[] __initdata = { 63static struct cpu_table cpu_ids[] __initdata = {
@@ -95,7 +94,6 @@ static struct cpu_table cpu_ids[] __initdata = {
95 }, { 94 }, {
96 .idcode = EXYNOS5440_SOC_ID, 95 .idcode = EXYNOS5440_SOC_ID,
97 .idmask = EXYNOS5_SOC_MASK, 96 .idmask = EXYNOS5_SOC_MASK,
98 .map_io = exynos5440_map_io,
99 .init = exynos_init, 97 .init = exynos_init,
100 .name = name_exynos5440, 98 .name = name_exynos5440,
101 }, 99 },
@@ -150,11 +148,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
150 .length = SZ_64K, 148 .length = SZ_64K,
151 .type = MT_DEVICE, 149 .type = MT_DEVICE,
152 }, { 150 }, {
153 .virtual = (unsigned long)S3C_VA_UART,
154 .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
155 .length = SZ_512K,
156 .type = MT_DEVICE,
157 }, {
158 .virtual = (unsigned long)S5P_VA_CMU, 151 .virtual = (unsigned long)S5P_VA_CMU,
159 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU), 152 .pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
160 .length = SZ_128K, 153 .length = SZ_128K,
@@ -268,20 +261,6 @@ static struct map_desc exynos5_iodesc[] __initdata = {
268 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU), 261 .pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
269 .length = SZ_64K, 262 .length = SZ_64K,
270 .type = MT_DEVICE, 263 .type = MT_DEVICE,
271 }, {
272 .virtual = (unsigned long)S3C_VA_UART,
273 .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
274 .length = SZ_512K,
275 .type = MT_DEVICE,
276 },
277};
278
279static struct map_desc exynos5440_iodesc0[] __initdata = {
280 {
281 .virtual = (unsigned long)S3C_VA_UART,
282 .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
283 .length = SZ_512K,
284 .type = MT_DEVICE,
285 }, 264 },
286}; 265};
287 266
@@ -388,11 +367,6 @@ static void __init exynos5_map_io(void)
388 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc)); 367 iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
389} 368}
390 369
391static void __init exynos5440_map_io(void)
392{
393 iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
394}
395
396void __init exynos_init_time(void) 370void __init exynos_init_time(void)
397{ 371{
398 of_clk_init(NULL); 372 of_clk_init(NULL);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 3e156bcddcb4..972490fc09d6 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -97,6 +97,5 @@ struct exynos_pmu_conf {
97}; 97};
98 98
99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode); 99extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
100extern void s3c_cpu_resume(void);
101 100
102#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */ 101#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 17a18ff3d71e..225ee8431c72 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -25,6 +25,7 @@
25#include <mach/regs-pmu.h> 25#include <mach/regs-pmu.h>
26 26
27#include <plat/cpu.h> 27#include <plat/cpu.h>
28#include <plat/pm.h>
28 29
29#include "common.h" 30#include "common.h"
30 31
diff --git a/arch/arm/mach-exynos/headsmp.S b/arch/arm/mach-exynos/headsmp.S
index 5364d4bfa8bc..cdd9d91e9933 100644
--- a/arch/arm/mach-exynos/headsmp.S
+++ b/arch/arm/mach-exynos/headsmp.S
@@ -13,8 +13,6 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16 __CPUINIT
17
18/* 16/*
19 * exynos4 specific entry point for secondary CPUs. This provides 17 * exynos4 specific entry point for secondary CPUs. This provides
20 * a "holding pen" into which all secondary cores are held until we're 18 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-exynos/include/mach/memory.h b/arch/arm/mach-exynos/include/mach/memory.h
index 374ef2cf7152..2a4cdb7cb326 100644
--- a/arch/arm/mach-exynos/include/mach/memory.h
+++ b/arch/arm/mach-exynos/include/mach/memory.h
@@ -15,8 +15,13 @@
15 15
16#define PLAT_PHYS_OFFSET UL(0x40000000) 16#define PLAT_PHYS_OFFSET UL(0x40000000)
17 17
18#ifndef CONFIG_ARM_LPAE
18/* Maximum of 256MiB in one bank */ 19/* Maximum of 256MiB in one bank */
19#define MAX_PHYSMEM_BITS 32 20#define MAX_PHYSMEM_BITS 32
20#define SECTION_SIZE_BITS 28 21#define SECTION_SIZE_BITS 28
22#else
23#define MAX_PHYSMEM_BITS 36
24#define SECTION_SIZE_BITS 31
25#endif
21 26
22#endif /* __ASM_ARCH_MEMORY_H */ 27#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index deba1308ff16..58b43e6f9262 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -75,7 +75,7 @@ static void __iomem *scu_base_addr(void)
75 75
76static DEFINE_SPINLOCK(boot_lock); 76static DEFINE_SPINLOCK(boot_lock);
77 77
78static void __cpuinit exynos_secondary_init(unsigned int cpu) 78static void exynos_secondary_init(unsigned int cpu)
79{ 79{
80 /* 80 /*
81 * let the primary processor know we're out of the 81 * let the primary processor know we're out of the
@@ -90,7 +90,7 @@ static void __cpuinit exynos_secondary_init(unsigned int cpu)
90 spin_unlock(&boot_lock); 90 spin_unlock(&boot_lock);
91} 91}
92 92
93static int __cpuinit exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 93static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
94{ 94{
95 unsigned long timeout; 95 unsigned long timeout;
96 unsigned long phys_cpu = cpu_logical_map(cpu); 96 unsigned long phys_cpu = cpu_logical_map(cpu);
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 41c20692a13f..c679db577269 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -217,6 +217,9 @@ static __init int exynos_pm_drvinit(void)
217 struct clk *pll_base; 217 struct clk *pll_base;
218 unsigned int tmp; 218 unsigned int tmp;
219 219
220 if (soc_is_exynos5440())
221 return 0;
222
220 s3c_pm_init(); 223 s3c_pm_init();
221 224
222 /* All wakeup disable */ 225 /* All wakeup disable */
@@ -340,6 +343,9 @@ static struct syscore_ops exynos_pm_syscore_ops = {
340 343
341static __init int exynos_pm_syscore_init(void) 344static __init int exynos_pm_syscore_init(void)
342{ 345{
346 if (soc_is_exynos5440())
347 return 0;
348
343 register_syscore_ops(&exynos_pm_syscore_ops); 349 register_syscore_ops(&exynos_pm_syscore_ops);
344 return 0; 350 return 0;
345} 351}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08d..3490a24f969e 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
276 276
277 sys->mem_offset = DC21285_PCI_MEM; 277 sys->mem_offset = DC21285_PCI_MEM;
278 278
279 pci_ioremap_io(0, DC21285_PCI_IO);
280
281 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset); 279 pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
282 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset); 280 pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
283 281
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index dc5d6becd8c7..88815795fe26 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -115,6 +115,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
115{ 115{
116 struct resource *res; 116 struct resource *res;
117 int reg = -1; 117 int reg = -1;
118 u32 val;
118 struct device *dev = __dev; 119 struct device *dev = __dev;
119 120
120 if (event != BUS_NOTIFY_ADD_DEVICE) 121 if (event != BUS_NOTIFY_ADD_DEVICE)
@@ -141,10 +142,10 @@ static int highbank_platform_notifier(struct notifier_block *nb,
141 return NOTIFY_DONE; 142 return NOTIFY_DONE;
142 143
143 if (of_property_read_bool(dev->of_node, "dma-coherent")) { 144 if (of_property_read_bool(dev->of_node, "dma-coherent")) {
144 writel(0xff31, sregs_base + reg); 145 val = readl(sregs_base + reg);
146 writel(val | 0xff01, sregs_base + reg);
145 set_dma_ops(dev, &arm_coherent_dma_ops); 147 set_dma_ops(dev, &arm_coherent_dma_ops);
146 } else 148 }
147 writel(0, sregs_base + reg);
148 149
149 return NOTIFY_OK; 150 return NOTIFY_OK;
150} 151}
diff --git a/arch/arm/mach-highbank/platsmp.c b/arch/arm/mach-highbank/platsmp.c
index a984573e0d02..32d75cf55cbc 100644
--- a/arch/arm/mach-highbank/platsmp.c
+++ b/arch/arm/mach-highbank/platsmp.c
@@ -24,7 +24,7 @@
24 24
25extern void secondary_startup(void); 25extern void secondary_startup(void);
26 26
27static int __cpuinit highbank_boot_secondary(unsigned int cpu, struct task_struct *idle) 27static int highbank_boot_secondary(unsigned int cpu, struct task_struct *idle)
28{ 28{
29 highbank_set_cpu_jump(cpu, secondary_startup); 29 highbank_set_cpu_jump(cpu, secondary_startup);
30 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 30 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4282e99f5ca1..86567d980b07 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -199,7 +199,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", }; 199static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; 200static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", }; 201static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
202static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 202static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
203static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
203static const char *vdo_axi_sels[] = { "axi", "ahb", }; 204static const char *vdo_axi_sels[] = { "axi", "ahb", };
204static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 205static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
205static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div", 206static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -392,7 +393,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
392 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels)); 393 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
393 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); 394 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
394 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels)); 395 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
395 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels)); 396 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels));
396 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); 397 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
397 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); 398 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
398 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); 399 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index d617c0b7c809..b169a396d93b 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -183,6 +183,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); 183 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); 184 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23); 185 clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23);
186 clk[VF610_CLK_ENET0] = imx_clk_gate2("enet0", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(0));
187 clk[VF610_CLK_ENET1] = imx_clk_gate2("enet1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(1));
186 188
187 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7)); 189 clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
188 190
diff --git a/arch/arm/mach-imx/mx27.h b/arch/arm/mach-imx/mx27.h
index e074616d54ca..8a65f192e7f3 100644
--- a/arch/arm/mach-imx/mx27.h
+++ b/arch/arm/mach-imx/mx27.h
@@ -135,7 +135,7 @@
135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4) 135#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5) 136#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6) 137#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
138#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7) 138#define MX27_INT_MSHC (NR_IRQS_LEGACY + 7)
139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8) 139#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9) 140#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10) 141#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index c6e1ab544882..1f24c1fdfea4 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -53,7 +53,7 @@ void imx_scu_standby_enable(void)
53 writel_relaxed(val, scu_base); 53 writel_relaxed(val, scu_base);
54} 54}
55 55
56static int __cpuinit imx_boot_secondary(unsigned int cpu, struct task_struct *idle) 56static int imx_boot_secondary(unsigned int cpu, struct task_struct *idle)
57{ 57{
58 imx_set_cpu_jump(cpu, v7_secondary_startup); 58 imx_set_cpu_jump(cpu, v7_secondary_startup);
59 imx_enable_cpu(cpu, true); 59 imx_enable_cpu(cpu, true);
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index fe4d9ff93a7e..b661c5c2870a 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -49,7 +49,7 @@ static const char *keystone_match[] __initconst = {
49 NULL, 49 NULL,
50}; 50};
51 51
52void keystone_restart(char mode, const char *cmd) 52void keystone_restart(enum reboot_mode mode, const char *cmd)
53{ 53{
54 u32 val; 54 u32 val;
55 55
diff --git a/arch/arm/mach-keystone/platsmp.c b/arch/arm/mach-keystone/platsmp.c
index 1d4181e1daf2..14378e3fef16 100644
--- a/arch/arm/mach-keystone/platsmp.c
+++ b/arch/arm/mach-keystone/platsmp.c
@@ -21,7 +21,7 @@
21 21
22#include "keystone.h" 22#include "keystone.h"
23 23
24static int __cpuinit keystone_smp_boot_secondary(unsigned int cpu, 24static int keystone_smp_boot_secondary(unsigned int cpu,
25 struct task_struct *idle) 25 struct task_struct *idle)
26{ 26{
27 unsigned long start = virt_to_phys(&secondary_startup); 27 unsigned long start = virt_to_phys(&secondary_startup);
diff --git a/arch/arm/mach-msm/headsmp.S b/arch/arm/mach-msm/headsmp.S
index bcd5af223dea..6c62c3f82fe6 100644
--- a/arch/arm/mach-msm/headsmp.S
+++ b/arch/arm/mach-msm/headsmp.S
@@ -11,8 +11,6 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14 __CPUINIT
15
16/* 14/*
17 * MSM specific entry point for secondary CPUs. This provides 15 * MSM specific entry point for secondary CPUs. This provides
18 * a "holding pen" into which all secondary cores are held until we're 16 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 00cdb0a5dac8..3f06edcdd0ce 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -38,7 +38,7 @@ static inline int get_core_count(void)
38 return ((read_cpuid_id() >> 4) & 3) + 1; 38 return ((read_cpuid_id() >> 4) & 3) + 1;
39} 39}
40 40
41static void __cpuinit msm_secondary_init(unsigned int cpu) 41static void msm_secondary_init(unsigned int cpu)
42{ 42{
43 /* 43 /*
44 * let the primary processor know we're out of the 44 * let the primary processor know we're out of the
@@ -54,7 +54,7 @@ static void __cpuinit msm_secondary_init(unsigned int cpu)
54 spin_unlock(&boot_lock); 54 spin_unlock(&boot_lock);
55} 55}
56 56
57static __cpuinit void prepare_cold_cpu(unsigned int cpu) 57static void prepare_cold_cpu(unsigned int cpu)
58{ 58{
59 int ret; 59 int ret;
60 ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup), 60 ret = scm_set_boot_addr(virt_to_phys(msm_secondary_startup),
@@ -73,7 +73,7 @@ static __cpuinit void prepare_cold_cpu(unsigned int cpu)
73 "address\n"); 73 "address\n");
74} 74}
75 75
76static int __cpuinit msm_boot_secondary(unsigned int cpu, struct task_struct *idle) 76static int msm_boot_secondary(unsigned int cpu, struct task_struct *idle)
77{ 77{
78 unsigned long timeout; 78 unsigned long timeout;
79 static int cold_boot_done; 79 static int cold_boot_done;
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index b6418fd5fe0d..8697cfc0d0b6 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -139,7 +139,7 @@ static struct clocksource msm_clocksource = {
139}; 139};
140 140
141#ifdef CONFIG_LOCAL_TIMERS 141#ifdef CONFIG_LOCAL_TIMERS
142static int __cpuinit msm_local_timer_setup(struct clock_event_device *evt) 142static int msm_local_timer_setup(struct clock_event_device *evt)
143{ 143{
144 /* Use existing clock_event for cpu 0 */ 144 /* Use existing clock_event for cpu 0 */
145 if (!smp_processor_id()) 145 if (!smp_processor_id())
@@ -164,7 +164,7 @@ static void msm_local_timer_stop(struct clock_event_device *evt)
164 disable_percpu_irq(evt->irq); 164 disable_percpu_irq(evt->irq);
165} 165}
166 166
167static struct local_timer_ops msm_local_timer_ops __cpuinitdata = { 167static struct local_timer_ops msm_local_timer_ops = {
168 .setup = msm_local_timer_setup, 168 .setup = msm_local_timer_setup,
169 .stop = msm_local_timer_stop, 169 .stop = msm_local_timer_stop,
170}; 170};
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index be117591f7f2..4c24303ec481 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -28,7 +28,7 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include "armada-370-xp.h" 29#include "armada-370-xp.h"
30 30
31unsigned long __cpuinitdata coherency_phys_base; 31unsigned long coherency_phys_base;
32static void __iomem *coherency_base; 32static void __iomem *coherency_base;
33static void __iomem *coherency_cpu_base; 33static void __iomem *coherency_cpu_base;
34 34
diff --git a/arch/arm/mach-mvebu/headsmp.S b/arch/arm/mach-mvebu/headsmp.S
index 7147300c8af2..8a1b0c96e9ec 100644
--- a/arch/arm/mach-mvebu/headsmp.S
+++ b/arch/arm/mach-mvebu/headsmp.S
@@ -21,8 +21,6 @@
21#include <linux/linkage.h> 21#include <linux/linkage.h>
22#include <linux/init.h> 22#include <linux/init.h>
23 23
24 __CPUINIT
25
26/* 24/*
27 * Armada XP specific entry point for secondary CPUs. 25 * Armada XP specific entry point for secondary CPUs.
28 * We add the CPU to the coherency fabric and then jump to secondary 26 * We add the CPU to the coherency fabric and then jump to secondary
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index 93f2f3ab45f1..ce81d3031405 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -71,13 +71,12 @@ void __init set_secondary_cpus_clock(void)
71 } 71 }
72} 72}
73 73
74static void __cpuinit armada_xp_secondary_init(unsigned int cpu) 74static void armada_xp_secondary_init(unsigned int cpu)
75{ 75{
76 armada_xp_mpic_smp_cpu_init(); 76 armada_xp_mpic_smp_cpu_init();
77} 77}
78 78
79static int __cpuinit armada_xp_boot_secondary(unsigned int cpu, 79static int armada_xp_boot_secondary(unsigned int cpu, struct task_struct *idle)
80 struct task_struct *idle)
81{ 80{
82 pr_info("Booting CPU %d\n", cpu); 81 pr_info("Booting CPU %d\n", cpu);
83 82
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fba..3eed0006d189 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,7 +62,7 @@ config SOC_OMAP5
62 select HAVE_SMP 62 select HAVE_SMP
63 select COMMON_CLK 63 select COMMON_CLK
64 select HAVE_ARM_ARCH_TIMER 64 select HAVE_ARM_ARCH_TIMER
65 select ARM_ERRATA_798181 65 select ARM_ERRATA_798181 if SMP
66 66
67config SOC_AM33XX 67config SOC_AM33XX
68 bool "AM33XX support" 68 bool "AM33XX support"
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e5fbfed69aa2..be5d005ebad2 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,6 +15,7 @@
15#include <linux/of_irq.h> 15#include <linux/of_irq.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
18#include <linux/clk.h>
18 19
19#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
20 21
@@ -35,6 +36,21 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
35 { } 36 { }
36}; 37};
37 38
39/*
40 * Create alias for USB host PHY clock.
41 * Remove this when clock phandle can be provided via DT
42 */
43static void __init legacy_init_ehci_clk(char *clkname)
44{
45 int ret;
46
47 ret = clk_add_alias("main_clk", NULL, clkname, NULL);
48 if (ret) {
49 pr_err("%s:Failed to add main_clk alias to %s :%d\n",
50 __func__, clkname, ret);
51 }
52}
53
38static void __init omap_generic_init(void) 54static void __init omap_generic_init(void)
39{ 55{
40 omap_sdrc_init(NULL, NULL); 56 omap_sdrc_init(NULL, NULL);
@@ -45,10 +61,15 @@ static void __init omap_generic_init(void)
45 * HACK: call display setup code for selected boards to enable omapdss. 61 * HACK: call display setup code for selected boards to enable omapdss.
46 * This will be removed when omapdss supports DT. 62 * This will be removed when omapdss supports DT.
47 */ 63 */
48 if (of_machine_is_compatible("ti,omap4-panda")) 64 if (of_machine_is_compatible("ti,omap4-panda")) {
49 omap4_panda_display_init_of(); 65 omap4_panda_display_init_of();
66 legacy_init_ehci_clk("auxclk3_ck");
67
68 }
50 else if (of_machine_is_compatible("ti,omap4-sdp")) 69 else if (of_machine_is_compatible("ti,omap4-sdp"))
51 omap_4430sdp_display_init_of(); 70 omap_4430sdp_display_init_of();
71 else if (of_machine_is_compatible("ti,omap5-uevm"))
72 legacy_init_ehci_clk("auxclk1_ck");
52} 73}
53 74
54#ifdef CONFIG_SOC_OMAP2420 75#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4ea308114165..75e92952c18e 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -20,8 +20,6 @@
20 20
21#include "omap44xx.h" 21#include "omap44xx.h"
22 22
23 __CPUINIT
24
25/* Physical address needed since MMU not enabled yet on secondary core */ 23/* Physical address needed since MMU not enabled yet on secondary core */
26#define AUX_CORE_BOOT0_PA 0x48281800 24#define AUX_CORE_BOOT0_PA 0x48281800
27 25
diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
index f993a4188701..f991016e2a6a 100644
--- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
+++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
@@ -291,7 +291,7 @@ int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
291 * @cpu : CPU ID 291 * @cpu : CPU ID
292 * @power_state: CPU low power state. 292 * @power_state: CPU low power state.
293 */ 293 */
294int __cpuinit omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state) 294int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
295{ 295{
296 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); 296 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu);
297 unsigned int cpu_state = 0; 297 unsigned int cpu_state = 0;
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 98a11463a843..8708b2a9da45 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -51,7 +51,7 @@ void __iomem *omap4_get_scu_base(void)
51 return scu_base; 51 return scu_base;
52} 52}
53 53
54static void __cpuinit omap4_secondary_init(unsigned int cpu) 54static void omap4_secondary_init(unsigned int cpu)
55{ 55{
56 /* 56 /*
57 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device. 57 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
@@ -72,7 +72,7 @@ static void __cpuinit omap4_secondary_init(unsigned int cpu)
72 spin_unlock(&boot_lock); 72 spin_unlock(&boot_lock);
73} 73}
74 74
75static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) 75static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
76{ 76{
77 static struct clockdomain *cpu1_clkdm; 77 static struct clockdomain *cpu1_clkdm;
78 static bool booted; 78 static bool booted;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f8bb3b9b6a76..813c61558a5f 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -323,8 +323,8 @@ static void irq_save_secure_context(void)
323#endif 323#endif
324 324
325#ifdef CONFIG_HOTPLUG_CPU 325#ifdef CONFIG_HOTPLUG_CPU
326static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self, 326static int irq_cpu_hotplug_notify(struct notifier_block *self,
327 unsigned long action, void *hcpu) 327 unsigned long action, void *hcpu)
328{ 328{
329 unsigned int cpu = (unsigned int)hcpu; 329 unsigned int cpu = (unsigned int)hcpu;
330 330
diff --git a/arch/arm/mach-prima2/headsmp.S b/arch/arm/mach-prima2/headsmp.S
index 5b8a408d8921..d86fe33c5f53 100644
--- a/arch/arm/mach-prima2/headsmp.S
+++ b/arch/arm/mach-prima2/headsmp.S
@@ -9,8 +9,6 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12 __CPUINIT
13
14/* 12/*
15 * SIRFSOC specific entry point for secondary CPUs. This provides 13 * SIRFSOC specific entry point for secondary CPUs. This provides
16 * a "holding pen" into which all secondary cores are held until we're 14 * a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c
index 1c3de7bed841..3dbcb1ab6e37 100644
--- a/arch/arm/mach-prima2/platsmp.c
+++ b/arch/arm/mach-prima2/platsmp.c
@@ -44,7 +44,7 @@ void __init sirfsoc_map_scu(void)
44 scu_base = (void __iomem *)SIRFSOC_VA(base); 44 scu_base = (void __iomem *)SIRFSOC_VA(base);
45} 45}
46 46
47static void __cpuinit sirfsoc_secondary_init(unsigned int cpu) 47static void sirfsoc_secondary_init(unsigned int cpu)
48{ 48{
49 /* 49 /*
50 * let the primary processor know we're out of the 50 * let the primary processor know we're out of the
@@ -65,7 +65,7 @@ static struct of_device_id rsc_ids[] = {
65 {}, 65 {},
66}; 66};
67 67
68static int __cpuinit sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) 68static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
69{ 69{
70 unsigned long timeout; 70 unsigned long timeout;
71 struct device_node *np; 71 struct device_node *np;
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index f6726bb4eb95..3a3362fa793e 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -477,16 +477,24 @@ static int em_x270_usb_hub_init(void)
477 /* USB Hub power-on and reset */ 477 /* USB Hub power-on and reset */
478 gpio_direction_output(usb_hub_reset, 1); 478 gpio_direction_output(usb_hub_reset, 1);
479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0); 479 gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
480 regulator_enable(em_x270_usb_ldo); 480 err = regulator_enable(em_x270_usb_ldo);
481 if (err)
482 goto err_free_rst_gpio;
483
481 gpio_set_value(usb_hub_reset, 0); 484 gpio_set_value(usb_hub_reset, 0);
482 gpio_set_value(usb_hub_reset, 1); 485 gpio_set_value(usb_hub_reset, 1);
483 regulator_disable(em_x270_usb_ldo); 486 regulator_disable(em_x270_usb_ldo);
484 regulator_enable(em_x270_usb_ldo); 487 err = regulator_enable(em_x270_usb_ldo);
488 if (err)
489 goto err_free_rst_gpio;
490
485 gpio_set_value(usb_hub_reset, 0); 491 gpio_set_value(usb_hub_reset, 0);
486 gpio_set_value(GPIO9_USB_VBUS_EN, 1); 492 gpio_set_value(GPIO9_USB_VBUS_EN, 1);
487 493
488 return 0; 494 return 0;
489 495
496err_free_rst_gpio:
497 gpio_free(usb_hub_reset);
490err_free_vbus_gpio: 498err_free_vbus_gpio:
491 gpio_free(GPIO9_USB_VBUS_EN); 499 gpio_free(GPIO9_USB_VBUS_EN);
492err_free_usb_ldo: 500err_free_usb_ldo:
@@ -592,7 +600,7 @@ err_irq:
592 return err; 600 return err;
593} 601}
594 602
595static void em_x270_mci_setpower(struct device *dev, unsigned int vdd) 603static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
596{ 604{
597 struct pxamci_platform_data* p_d = dev->platform_data; 605 struct pxamci_platform_data* p_d = dev->platform_data;
598 606
@@ -600,10 +608,11 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
600 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000; 608 int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
601 609
602 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV); 610 regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
603 regulator_enable(em_x270_sdio_ldo); 611 return regulator_enable(em_x270_sdio_ldo);
604 } else { 612 } else {
605 regulator_disable(em_x270_sdio_ldo); 613 regulator_disable(em_x270_sdio_ldo);
606 } 614 }
615 return 0;
607} 616}
608 617
609static void em_x270_mci_exit(struct device *dev, void *data) 618static void em_x270_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d2c652318376..dd70343c8708 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -408,7 +408,7 @@ static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_in
408 return err; 408 return err;
409} 409}
410 410
411static void mainstone_mci_setpower(struct device *dev, unsigned int vdd) 411static int mainstone_mci_setpower(struct device *dev, unsigned int vdd)
412{ 412{
413 struct pxamci_platform_data* p_d = dev->platform_data; 413 struct pxamci_platform_data* p_d = dev->platform_data;
414 414
@@ -420,6 +420,7 @@ static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
420 printk(KERN_DEBUG "%s: off\n", __func__); 420 printk(KERN_DEBUG "%s: off\n", __func__);
421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON; 421 MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON;
422 } 422 }
423 return 0;
423} 424}
424 425
425static void mainstone_mci_exit(struct device *dev, void *data) 426static void mainstone_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index fb7f1d1627dc..13e5b00eae90 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -335,7 +335,7 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
335 return err; 335 return err;
336} 336}
337 337
338static void pcm990_mci_setpower(struct device *dev, unsigned int vdd) 338static int pcm990_mci_setpower(struct device *dev, unsigned int vdd)
339{ 339{
340 struct pxamci_platform_data *p_d = dev->platform_data; 340 struct pxamci_platform_data *p_d = dev->platform_data;
341 u8 val; 341 u8 val;
@@ -348,6 +348,7 @@ static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
348 val &= ~PCM990_CTRL_MMC2PWR; 348 val &= ~PCM990_CTRL_MMC2PWR;
349 349
350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5); 350 pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5);
351 return 0;
351} 352}
352 353
353static void pcm990_mci_exit(struct device *dev, void *data) 354static void pcm990_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 711d37e26bd8..aedf053a1de5 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -258,7 +258,7 @@ err_free_2:
258 return err; 258 return err;
259} 259}
260 260
261static void poodle_mci_setpower(struct device *dev, unsigned int vdd) 261static int poodle_mci_setpower(struct device *dev, unsigned int vdd)
262{ 262{
263 struct pxamci_platform_data* p_d = dev->platform_data; 263 struct pxamci_platform_data* p_d = dev->platform_data;
264 264
@@ -270,6 +270,8 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0); 270 gpio_set_value(POODLE_GPIO_SD_PWR1, 0);
271 gpio_set_value(POODLE_GPIO_SD_PWR, 0); 271 gpio_set_value(POODLE_GPIO_SD_PWR, 0);
272 } 272 }
273
274 return 0;
273} 275}
274 276
275static void poodle_mci_exit(struct device *dev, void *data) 277static void poodle_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2125df0444e7..4c29173026e8 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -598,7 +598,7 @@ static inline void spitz_spi_init(void) {}
598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to 598 * NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
599 * give the card a chance to fully insert/eject. 599 * give the card a chance to fully insert/eject.
600 */ 600 */
601static void spitz_mci_setpower(struct device *dev, unsigned int vdd) 601static int spitz_mci_setpower(struct device *dev, unsigned int vdd)
602{ 602{
603 struct pxamci_platform_data* p_d = dev->platform_data; 603 struct pxamci_platform_data* p_d = dev->platform_data;
604 604
@@ -606,6 +606,8 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V); 606 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
607 else 607 else
608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0); 608 spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
609
610 return 0;
609} 611}
610 612
611static struct pxamci_platform_data spitz_mci_platform_data = { 613static struct pxamci_platform_data spitz_mci_platform_data = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 88fde43c948c..62aea3e835f3 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -734,9 +734,10 @@ static int stargate2_mci_init(struct device *dev,
734 * 734 *
735 * Very simple control. Either it is on or off and is controlled by 735 * Very simple control. Either it is on or off and is controlled by
736 * a gpio pin */ 736 * a gpio pin */
737static void stargate2_mci_setpower(struct device *dev, unsigned int vdd) 737static int stargate2_mci_setpower(struct device *dev, unsigned int vdd)
738{ 738{
739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd); 739 gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd);
740 return 0;
740} 741}
741 742
742static void stargate2_mci_exit(struct device *dev, void *data) 743static void stargate2_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index 6d9252e081ce..7791ac76f945 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -208,7 +208,7 @@ config S3C24XX_GPIO_EXTRA128
208 208
209config S3C24XX_PLL 209config S3C24XX_PLL
210 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" 210 bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
211 depends on ARM_S3C24XX 211 depends on ARM_S3C24XX_CPUFREQ
212 help 212 help
213 Compile in support for changing the PLL frequency from the 213 Compile in support for changing the PLL frequency from the
214 S3C24XX series CPUfreq driver. The PLL takes time to settle 214 S3C24XX series CPUfreq driver. The PLL takes time to settle
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1d..564553694b54 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
119 } 119 }
120}; 120};
121 121
122static struct clk init_clocks[] = { 122static struct clk clk_lcd = {
123 { 123 .name = "lcd",
124 .name = "lcd", 124 .parent = &clk_h,
125 .parent = &clk_h, 125 .enable = s3c2410_clkcon_enable,
126 .enable = s3c2410_clkcon_enable, 126 .ctrlbit = S3C2410_CLKCON_LCDC,
127 .ctrlbit = S3C2410_CLKCON_LCDC, 127};
128 }, { 128
129 .name = "gpio", 129static struct clk clk_gpio = {
130 .parent = &clk_p, 130 .name = "gpio",
131 .enable = s3c2410_clkcon_enable, 131 .parent = &clk_p,
132 .ctrlbit = S3C2410_CLKCON_GPIO, 132 .enable = s3c2410_clkcon_enable,
133 }, { 133 .ctrlbit = S3C2410_CLKCON_GPIO,
134 .name = "usb-host", 134};
135 .parent = &clk_h, 135
136 .enable = s3c2410_clkcon_enable, 136static struct clk clk_usb_host = {
137 .ctrlbit = S3C2410_CLKCON_USBH, 137 .name = "usb-host",
138 }, { 138 .parent = &clk_h,
139 .name = "usb-device", 139 .enable = s3c2410_clkcon_enable,
140 .parent = &clk_h, 140 .ctrlbit = S3C2410_CLKCON_USBH,
141 .enable = s3c2410_clkcon_enable, 141};
142 .ctrlbit = S3C2410_CLKCON_USBD, 142
143 }, { 143static struct clk clk_usb_device = {
144 .name = "timers", 144 .name = "usb-device",
145 .parent = &clk_p, 145 .parent = &clk_h,
146 .enable = s3c2410_clkcon_enable, 146 .enable = s3c2410_clkcon_enable,
147 .ctrlbit = S3C2410_CLKCON_PWMT, 147 .ctrlbit = S3C2410_CLKCON_USBD,
148 }, { 148};
149 .name = "uart", 149
150 .devname = "s3c2410-uart.0", 150static struct clk clk_timers = {
151 .parent = &clk_p, 151 .name = "timers",
152 .enable = s3c2410_clkcon_enable, 152 .parent = &clk_p,
153 .ctrlbit = S3C2410_CLKCON_UART0, 153 .enable = s3c2410_clkcon_enable,
154 }, { 154 .ctrlbit = S3C2410_CLKCON_PWMT,
155 .name = "uart", 155};
156 .devname = "s3c2410-uart.1", 156
157 .parent = &clk_p, 157struct clk s3c24xx_clk_uart0 = {
158 .enable = s3c2410_clkcon_enable, 158 .name = "uart",
159 .ctrlbit = S3C2410_CLKCON_UART1, 159 .devname = "s3c2410-uart.0",
160 }, { 160 .parent = &clk_p,
161 .name = "uart", 161 .enable = s3c2410_clkcon_enable,
162 .devname = "s3c2410-uart.2", 162 .ctrlbit = S3C2410_CLKCON_UART0,
163 .parent = &clk_p, 163};
164 .enable = s3c2410_clkcon_enable, 164
165 .ctrlbit = S3C2410_CLKCON_UART2, 165struct clk s3c24xx_clk_uart1 = {
166 }, { 166 .name = "uart",
167 .name = "rtc", 167 .devname = "s3c2410-uart.1",
168 .parent = &clk_p, 168 .parent = &clk_p,
169 .enable = s3c2410_clkcon_enable, 169 .enable = s3c2410_clkcon_enable,
170 .ctrlbit = S3C2410_CLKCON_RTC, 170 .ctrlbit = S3C2410_CLKCON_UART1,
171 }, { 171};
172 .name = "watchdog", 172
173 .parent = &clk_p, 173struct clk s3c24xx_clk_uart2 = {
174 .ctrlbit = 0, 174 .name = "uart",
175 }, { 175 .devname = "s3c2410-uart.2",
176 .name = "usb-bus-host", 176 .parent = &clk_p,
177 .parent = &clk_usb_bus, 177 .enable = s3c2410_clkcon_enable,
178 }, { 178 .ctrlbit = S3C2410_CLKCON_UART2,
179 .name = "usb-bus-gadget", 179};
180 .parent = &clk_usb_bus, 180
181 }, 181static struct clk clk_rtc = {
182 .name = "rtc",
183 .parent = &clk_p,
184 .enable = s3c2410_clkcon_enable,
185 .ctrlbit = S3C2410_CLKCON_RTC,
186};
187
188static struct clk clk_watchdog = {
189 .name = "watchdog",
190 .parent = &clk_p,
191 .ctrlbit = 0,
192};
193
194static struct clk clk_usb_bus_host = {
195 .name = "usb-bus-host",
196 .parent = &clk_usb_bus,
197};
198
199static struct clk clk_usb_bus_gadget = {
200 .name = "usb-bus-gadget",
201 .parent = &clk_usb_bus,
202};
203
204static struct clk *init_clocks[] = {
205 &clk_lcd,
206 &clk_gpio,
207 &clk_usb_host,
208 &clk_usb_device,
209 &clk_timers,
210 &s3c24xx_clk_uart0,
211 &s3c24xx_clk_uart1,
212 &s3c24xx_clk_uart2,
213 &clk_rtc,
214 &clk_watchdog,
215 &clk_usb_bus_host,
216 &clk_usb_bus_gadget,
182}; 217};
183 218
184/* s3c2410_baseclk_add() 219/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
195{ 230{
196 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW); 231 unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
197 unsigned long clkcon = __raw_readl(S3C2410_CLKCON); 232 unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
198 struct clk *clkp;
199 struct clk *xtal; 233 struct clk *xtal;
200 int ret; 234 int ret;
201 int ptr; 235 int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
207 241
208 /* register clocks from clock array */ 242 /* register clocks from clock array */
209 243
210 clkp = init_clocks; 244 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
211 for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) { 245 struct clk *clkp = init_clocks[ptr];
246
212 /* ensure that we note the clock state */ 247 /* ensure that we note the clock state */
213 248
214 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0; 249 clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b5680826..aaf006d1d6dc 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk), 166 CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), 167 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n), 168 CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
169 CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
170 CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
171 CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
169 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll), 172 CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
170}; 173};
171 174
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index 6f9865467258..bfd920083a3b 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -23,7 +23,6 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <asm/memory.h> 24#include <asm/memory.h>
25 25
26 __CPUINIT
27/* 26/*
28 * Boot code for secondary CPUs. 27 * Boot code for secondary CPUs.
29 * 28 *
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 559d1ce5f57e..a9d212498987 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -14,8 +14,6 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/memory.h> 15#include <asm/memory.h>
16 16
17 __CPUINIT
18
19ENTRY(shmobile_invalidate_start) 17ENTRY(shmobile_invalidate_start)
20 bl v7_invalidate_l1 18 bl v7_invalidate_l1
21 b secondary_startup 19 b secondary_startup
diff --git a/arch/arm/mach-shmobile/smp-emev2.c b/arch/arm/mach-shmobile/smp-emev2.c
index 80991b35f4ac..22a05a869d25 100644
--- a/arch/arm/mach-shmobile/smp-emev2.c
+++ b/arch/arm/mach-shmobile/smp-emev2.c
@@ -30,7 +30,7 @@
30 30
31#define EMEV2_SCU_BASE 0x1e000000 31#define EMEV2_SCU_BASE 0x1e000000
32 32
33static int __cpuinit emev2_boot_secondary(unsigned int cpu, struct task_struct *idle) 33static int emev2_boot_secondary(unsigned int cpu, struct task_struct *idle)
34{ 34{
35 arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu))); 35 arch_send_wakeup_ipi_mask(cpumask_of(cpu_logical_map(cpu)));
36 return 0; 36 return 0;
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index 526cfaae81c1..9bdf810f2a87 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -81,7 +81,7 @@ static int r8a7779_platform_cpu_kill(unsigned int cpu)
81 return ret ? ret : 1; 81 return ret ? ret : 1;
82} 82}
83 83
84static int __cpuinit r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle) 84static int r8a7779_boot_secondary(unsigned int cpu, struct task_struct *idle)
85{ 85{
86 struct r8a7779_pm_ch *ch = NULL; 86 struct r8a7779_pm_ch *ch = NULL;
87 int ret = -EIO; 87 int ret = -EIO;
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index d613113a04bd..d5fc3ed4e315 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -48,7 +48,7 @@ void __init sh73a0_register_twd(void)
48} 48}
49#endif 49#endif
50 50
51static int __cpuinit sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle) 51static int sh73a0_boot_secondary(unsigned int cpu, struct task_struct *idle)
52{ 52{
53 cpu = cpu_logical_map(cpu); 53 cpu = cpu_logical_map(cpu);
54 54
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 9004bfb1756e..95c115d8b5ee 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -10,7 +10,6 @@
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12 12
13 __CPUINIT
14 .arch armv7-a 13 .arch armv7-a
15 14
16ENTRY(secondary_trampoline) 15ENTRY(secondary_trampoline)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index b51ce8c7929d..5356a72bc8ce 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -29,7 +29,7 @@
29 29
30#include "core.h" 30#include "core.h"
31 31
32static int __cpuinit socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle) 32static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
33{ 33{
34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; 34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
35 35
diff --git a/arch/arm/mach-spear/generic.h b/arch/arm/mach-spear/generic.h
index 904f2c907b46..a99d90a4d09c 100644
--- a/arch/arm/mach-spear/generic.h
+++ b/arch/arm/mach-spear/generic.h
@@ -37,7 +37,7 @@ void __init spear13xx_l2x0_init(void);
37void spear_restart(enum reboot_mode, const char *); 37void spear_restart(enum reboot_mode, const char *);
38 38
39void spear13xx_secondary_startup(void); 39void spear13xx_secondary_startup(void);
40void __cpuinit spear13xx_cpu_die(unsigned int cpu); 40void spear13xx_cpu_die(unsigned int cpu);
41 41
42extern struct smp_operations spear13xx_smp_ops; 42extern struct smp_operations spear13xx_smp_ops;
43 43
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 9c4c722c954e..5c4a19887b2b 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -24,7 +24,7 @@ static DEFINE_SPINLOCK(boot_lock);
24 24
25static void __iomem *scu_base = IOMEM(VA_SCU_BASE); 25static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
26 26
27static void __cpuinit spear13xx_secondary_init(unsigned int cpu) 27static void spear13xx_secondary_init(unsigned int cpu)
28{ 28{
29 /* 29 /*
30 * let the primary processor know we're out of the 30 * let the primary processor know we're out of the
@@ -40,7 +40,7 @@ static void __cpuinit spear13xx_secondary_init(unsigned int cpu)
40 spin_unlock(&boot_lock); 40 spin_unlock(&boot_lock);
41} 41}
42 42
43static int __cpuinit spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) 43static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
44{ 44{
45 unsigned long timeout; 45 unsigned long timeout;
46 46
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index d04e3bfe1918..835833e3c4f8 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,9 @@ menuconfig ARCH_STI
11 select HAVE_SMP 11 select HAVE_SMP
12 select HAVE_ARM_SCU if SMP 12 select HAVE_ARM_SCU if SMP
13 select ARCH_REQUIRE_GPIOLIB 13 select ARCH_REQUIRE_GPIOLIB
14 select ARM_ERRATA_720789
15 select ARM_ERRATA_754322 14 select ARM_ERRATA_754322
15 select ARM_ERRATA_764369
16 select ARM_ERRATA_775420
16 select PL310_ERRATA_753970 if CACHE_PL310 17 select PL310_ERRATA_753970 if CACHE_PL310
17 select PL310_ERRATA_769419 if CACHE_PL310 18 select PL310_ERRATA_769419 if CACHE_PL310
18 help 19 help
diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c
index 977a863468fc..dce50d983a8e 100644
--- a/arch/arm/mach-sti/platsmp.c
+++ b/arch/arm/mach-sti/platsmp.c
@@ -27,7 +27,7 @@
27 27
28#include "smp.h" 28#include "smp.h"
29 29
30static void __cpuinit write_pen_release(int val) 30static void write_pen_release(int val)
31{ 31{
32 pen_release = val; 32 pen_release = val;
33 smp_wmb(); 33 smp_wmb();
@@ -37,7 +37,7 @@ static void __cpuinit write_pen_release(int val)
37 37
38static DEFINE_SPINLOCK(boot_lock); 38static DEFINE_SPINLOCK(boot_lock);
39 39
40void __cpuinit sti_secondary_init(unsigned int cpu) 40void sti_secondary_init(unsigned int cpu)
41{ 41{
42 trace_hardirqs_off(); 42 trace_hardirqs_off();
43 43
@@ -54,7 +54,7 @@ void __cpuinit sti_secondary_init(unsigned int cpu)
54 spin_unlock(&boot_lock); 54 spin_unlock(&boot_lock);
55} 55}
56 56
57int __cpuinit sti_boot_secondary(unsigned int cpu, struct task_struct *idle) 57int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
58{ 58{
59 unsigned long timeout; 59 unsigned long timeout;
60 60
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index 24db4ac428ae..97b33a2a2d75 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -35,7 +35,7 @@
35 35
36static cpumask_t tegra_cpu_init_mask; 36static cpumask_t tegra_cpu_init_mask;
37 37
38static void __cpuinit tegra_secondary_init(unsigned int cpu) 38static void tegra_secondary_init(unsigned int cpu)
39{ 39{
40 cpumask_set_cpu(cpu, &tegra_cpu_init_mask); 40 cpumask_set_cpu(cpu, &tegra_cpu_init_mask);
41} 41}
@@ -167,7 +167,7 @@ static int tegra114_boot_secondary(unsigned int cpu, struct task_struct *idle)
167 return ret; 167 return ret;
168} 168}
169 169
170static int __cpuinit tegra_boot_secondary(unsigned int cpu, 170static int tegra_boot_secondary(unsigned int cpu,
171 struct task_struct *idle) 171 struct task_struct *idle)
172{ 172{
173 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20) 173 if (IS_ENABLED(CONFIG_ARCH_TEGRA_2x_SOC) && tegra_chip_id == TEGRA20)
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 94e69bee3da5..261fec140c06 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -191,7 +191,7 @@ static const char *lp_state[TEGRA_MAX_SUSPEND_MODE] = {
191 [TEGRA_SUSPEND_LP0] = "LP0", 191 [TEGRA_SUSPEND_LP0] = "LP0",
192}; 192};
193 193
194static int __cpuinit tegra_suspend_enter(suspend_state_t state) 194static int tegra_suspend_enter(suspend_state_t state)
195{ 195{
196 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode(); 196 enum tegra_suspend_mode mode = tegra_pmc_get_suspend_mode();
197 197
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 14d90469392f..1f296e796a4f 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -54,7 +54,7 @@ static void __iomem *scu_base_addr(void)
54 54
55static DEFINE_SPINLOCK(boot_lock); 55static DEFINE_SPINLOCK(boot_lock);
56 56
57static void __cpuinit ux500_secondary_init(unsigned int cpu) 57static void ux500_secondary_init(unsigned int cpu)
58{ 58{
59 /* 59 /*
60 * let the primary processor know we're out of the 60 * let the primary processor know we're out of the
@@ -69,7 +69,7 @@ static void __cpuinit ux500_secondary_init(unsigned int cpu)
69 spin_unlock(&boot_lock); 69 spin_unlock(&boot_lock);
70} 70}
71 71
72static int __cpuinit ux500_boot_secondary(unsigned int cpu, struct task_struct *idle) 72static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
73{ 73{
74 unsigned long timeout; 74 unsigned long timeout;
75 75
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5b799c29886e..5f252569c689 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -91,7 +91,7 @@ static void __init zynq_map_io(void)
91 zynq_scu_map_io(); 91 zynq_scu_map_io();
92} 92}
93 93
94static void zynq_system_reset(char mode, const char *cmd) 94static void zynq_system_reset(enum reboot_mode mode, const char *cmd)
95{ 95{
96 zynq_slcr_system_reset(); 96 zynq_slcr_system_reset();
97} 97}
diff --git a/arch/arm/mach-zynq/common.h b/arch/arm/mach-zynq/common.h
index fbbd0e21c404..3040d219570f 100644
--- a/arch/arm/mach-zynq/common.h
+++ b/arch/arm/mach-zynq/common.h
@@ -27,7 +27,7 @@ extern void secondary_startup(void);
27extern char zynq_secondary_trampoline; 27extern char zynq_secondary_trampoline;
28extern char zynq_secondary_trampoline_jump; 28extern char zynq_secondary_trampoline_jump;
29extern char zynq_secondary_trampoline_end; 29extern char zynq_secondary_trampoline_end;
30extern int __cpuinit zynq_cpun_start(u32 address, int cpu); 30extern int zynq_cpun_start(u32 address, int cpu);
31extern struct smp_operations zynq_smp_ops __initdata; 31extern struct smp_operations zynq_smp_ops __initdata;
32#endif 32#endif
33 33
diff --git a/arch/arm/mach-zynq/headsmp.S b/arch/arm/mach-zynq/headsmp.S
index d183cd234a9b..d4cd5f34fe5c 100644
--- a/arch/arm/mach-zynq/headsmp.S
+++ b/arch/arm/mach-zynq/headsmp.S
@@ -9,8 +9,6 @@
9#include <linux/linkage.h> 9#include <linux/linkage.h>
10#include <linux/init.h> 10#include <linux/init.h>
11 11
12 __CPUINIT
13
14ENTRY(zynq_secondary_trampoline) 12ENTRY(zynq_secondary_trampoline)
15 ldr r0, [pc] 13 ldr r0, [pc]
16 bx r0 14 bx r0
diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
index 023f225493f2..689fbbc3d9c8 100644
--- a/arch/arm/mach-zynq/platsmp.c
+++ b/arch/arm/mach-zynq/platsmp.c
@@ -30,11 +30,11 @@
30/* 30/*
31 * Store number of cores in the system 31 * Store number of cores in the system
32 * Because of scu_get_core_count() must be in __init section and can't 32 * Because of scu_get_core_count() must be in __init section and can't
33 * be called from zynq_cpun_start() because it is in __cpuinit section. 33 * be called from zynq_cpun_start() because it is not in __init section.
34 */ 34 */
35static int ncores; 35static int ncores;
36 36
37int __cpuinit zynq_cpun_start(u32 address, int cpu) 37int zynq_cpun_start(u32 address, int cpu)
38{ 38{
39 u32 trampoline_code_size = &zynq_secondary_trampoline_end - 39 u32 trampoline_code_size = &zynq_secondary_trampoline_end -
40 &zynq_secondary_trampoline; 40 &zynq_secondary_trampoline;
@@ -92,7 +92,7 @@ int __cpuinit zynq_cpun_start(u32 address, int cpu)
92} 92}
93EXPORT_SYMBOL(zynq_cpun_start); 93EXPORT_SYMBOL(zynq_cpun_start);
94 94
95static int __cpuinit zynq_boot_secondary(unsigned int cpu, 95static int zynq_boot_secondary(unsigned int cpu,
96 struct task_struct *idle) 96 struct task_struct *idle)
97{ 97{
98 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu); 98 return zynq_cpun_start(virt_to_phys(secondary_startup), cpu);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 2bb61e703d6c..d1a2d05971e0 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -443,8 +443,6 @@ ENTRY(cpu_arm1020_set_pte_ext)
443#endif /* CONFIG_MMU */ 443#endif /* CONFIG_MMU */
444 mov pc, lr 444 mov pc, lr
445 445
446 __CPUINIT
447
448 .type __arm1020_setup, #function 446 .type __arm1020_setup, #function
449__arm1020_setup: 447__arm1020_setup:
450 mov r0, #0 448 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 8f96aa40f510..9d89405c3d03 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm1020e_set_pte_ext)
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 mov pc, lr
427 427
428 __CPUINIT
429
430 .type __arm1020e_setup, #function 428 .type __arm1020e_setup, #function
431__arm1020e_setup: 429__arm1020e_setup:
432 mov r0, #0 430 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 8ebe4a469a22..6f01a0ae3b30 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -407,8 +407,6 @@ ENTRY(cpu_arm1022_set_pte_ext)
407#endif /* CONFIG_MMU */ 407#endif /* CONFIG_MMU */
408 mov pc, lr 408 mov pc, lr
409 409
410 __CPUINIT
411
412 .type __arm1022_setup, #function 410 .type __arm1022_setup, #function
413__arm1022_setup: 411__arm1022_setup:
414 mov r0, #0 412 mov r0, #0
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 093fc7e520c3..4799a24b43e6 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -396,9 +396,6 @@ ENTRY(cpu_arm1026_set_pte_ext)
396#endif /* CONFIG_MMU */ 396#endif /* CONFIG_MMU */
397 mov pc, lr 397 mov pc, lr
398 398
399
400 __CPUINIT
401
402 .type __arm1026_setup, #function 399 .type __arm1026_setup, #function
403__arm1026_setup: 400__arm1026_setup:
404 mov r0, #0 401 mov r0, #0
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 0ac908c7ade1..d42c37f9f5bc 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -116,8 +116,6 @@ ENTRY(cpu_arm720_reset)
116ENDPROC(cpu_arm720_reset) 116ENDPROC(cpu_arm720_reset)
117 .popsection 117 .popsection
118 118
119 __CPUINIT
120
121 .type __arm710_setup, #function 119 .type __arm710_setup, #function
122__arm710_setup: 120__arm710_setup:
123 mov r0, #0 121 mov r0, #0
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index fde2d2a794cf..9b0ae90cbf17 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -60,8 +60,6 @@ ENTRY(cpu_arm740_reset)
60ENDPROC(cpu_arm740_reset) 60ENDPROC(cpu_arm740_reset)
61 .popsection 61 .popsection
62 62
63 __CPUINIT
64
65 .type __arm740_setup, #function 63 .type __arm740_setup, #function
66__arm740_setup: 64__arm740_setup:
67 mov r0, #0 65 mov r0, #0
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 6ddea3e464bd..f6cc3f63ce39 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm7tdmi_reset)
51ENDPROC(cpu_arm7tdmi_reset) 51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection 52 .popsection
53 53
54 __CPUINIT
55
56 .type __arm7tdmi_setup, #function 54 .type __arm7tdmi_setup, #function
57__arm7tdmi_setup: 55__arm7tdmi_setup:
58 mov pc, lr 56 mov pc, lr
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..549557df6d57 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -410,8 +410,6 @@ ENTRY(cpu_arm920_do_resume)
410ENDPROC(cpu_arm920_do_resume) 410ENDPROC(cpu_arm920_do_resume)
411#endif 411#endif
412 412
413 __CPUINIT
414
415 .type __arm920_setup, #function 413 .type __arm920_setup, #function
416__arm920_setup: 414__arm920_setup:
417 mov r0, #0 415 mov r0, #0
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4464c49d7449..2a758b06c6f6 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -388,8 +388,6 @@ ENTRY(cpu_arm922_set_pte_ext)
388#endif /* CONFIG_MMU */ 388#endif /* CONFIG_MMU */
389 mov pc, lr 389 mov pc, lr
390 390
391 __CPUINIT
392
393 .type __arm922_setup, #function 391 .type __arm922_setup, #function
394__arm922_setup: 392__arm922_setup:
395 mov r0, #0 393 mov r0, #0
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 281eb9b9c1d6..97448c3acf38 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -438,8 +438,6 @@ ENTRY(cpu_arm925_set_pte_ext)
438#endif /* CONFIG_MMU */ 438#endif /* CONFIG_MMU */
439 mov pc, lr 439 mov pc, lr
440 440
441 __CPUINIT
442
443 .type __arm925_setup, #function 441 .type __arm925_setup, #function
444__arm925_setup: 442__arm925_setup:
445 mov r0, #0 443 mov r0, #0
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..0f098f407c9f 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -425,8 +425,6 @@ ENTRY(cpu_arm926_do_resume)
425ENDPROC(cpu_arm926_do_resume) 425ENDPROC(cpu_arm926_do_resume)
426#endif 426#endif
427 427
428 __CPUINIT
429
430 .type __arm926_setup, #function 428 .type __arm926_setup, #function
431__arm926_setup: 429__arm926_setup:
432 mov r0, #0 430 mov r0, #0
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 8da189d4a402..1c39a704ff6e 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -273,8 +273,6 @@ ENDPROC(arm940_dma_unmap_area)
273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
274 define_cache_functions arm940 274 define_cache_functions arm940
275 275
276 __CPUINIT
277
278 .type __arm940_setup, #function 276 .type __arm940_setup, #function
279__arm940_setup: 277__arm940_setup:
280 mov r0, #0 278 mov r0, #0
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index f666cf34075a..0289cd905e73 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -326,8 +326,6 @@ ENTRY(cpu_arm946_dcache_clean_area)
326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB
327 mov pc, lr 327 mov pc, lr
328 328
329 __CPUINIT
330
331 .type __arm946_setup, #function 329 .type __arm946_setup, #function
332__arm946_setup: 330__arm946_setup:
333 mov r0, #0 331 mov r0, #0
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 8881391dfb9e..f51197ba754a 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -51,8 +51,6 @@ ENTRY(cpu_arm9tdmi_reset)
51ENDPROC(cpu_arm9tdmi_reset) 51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection 52 .popsection
53 53
54 __CPUINIT
55
56 .type __arm9tdmi_setup, #function 54 .type __arm9tdmi_setup, #function
57__arm9tdmi_setup: 55__arm9tdmi_setup:
58 mov pc, lr 56 mov pc, lr
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index aaeb6c127c7a..2dfc0f1d3bfd 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -135,8 +135,6 @@ ENTRY(cpu_fa526_set_pte_ext)
135#endif 135#endif
136 mov pc, lr 136 mov pc, lr
137 137
138 __CPUINIT
139
140 .type __fa526_setup, #function 138 .type __fa526_setup, #function
141__fa526_setup: 139__fa526_setup:
142 /* On return of this routine, r0 must carry correct flags for CFG register */ 140 /* On return of this routine, r0 must carry correct flags for CFG register */
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 4106b09e0c29..d5146b98c8d1 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -514,8 +514,6 @@ ENTRY(cpu_feroceon_set_pte_ext)
514#endif 514#endif
515 mov pc, lr 515 mov pc, lr
516 516
517 __CPUINIT
518
519 .type __feroceon_setup, #function 517 .type __feroceon_setup, #function
520__feroceon_setup: 518__feroceon_setup:
521 mov r0, #0 519 mov r0, #0
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..40acba595731 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -383,8 +383,6 @@ ENTRY(cpu_mohawk_do_resume)
383ENDPROC(cpu_mohawk_do_resume) 383ENDPROC(cpu_mohawk_do_resume)
384#endif 384#endif
385 385
386 __CPUINIT
387
388 .type __mohawk_setup, #function 386 .type __mohawk_setup, #function
389__mohawk_setup: 387__mohawk_setup:
390 mov r0, #0 388 mov r0, #0
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 775d70fba937..c45319c8f1d9 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -159,8 +159,6 @@ ENTRY(cpu_sa110_set_pte_ext)
159#endif 159#endif
160 mov pc, lr 160 mov pc, lr
161 161
162 __CPUINIT
163
164 .type __sa110_setup, #function 162 .type __sa110_setup, #function
165__sa110_setup: 163__sa110_setup:
166 mov r10, #0 164 mov r10, #0
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index d92dfd081429..09d241ae2dbe 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -198,8 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
198ENDPROC(cpu_sa1100_do_resume) 198ENDPROC(cpu_sa1100_do_resume)
199#endif 199#endif
200 200
201 __CPUINIT
202
203 .type __sa1100_setup, #function 201 .type __sa1100_setup, #function
204__sa1100_setup: 202__sa1100_setup:
205 mov r0, #0 203 mov r0, #0
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 2d1ef87328a1..1128064fddcb 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -180,8 +180,6 @@ ENDPROC(cpu_v6_do_resume)
180 180
181 .align 181 .align
182 182
183 __CPUINIT
184
185/* 183/*
186 * __v6_setup 184 * __v6_setup
187 * 185 *
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 9704097c450e..f64afb9f1bd5 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -160,8 +160,6 @@ ENDPROC(cpu_v7_set_pte_ext)
160 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 160 mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
161 .endm 161 .endm
162 162
163 __CPUINIT
164
165 /* AT 163 /* AT
166 * TFR EV X F I D LR S 164 * TFR EV X F I D LR S
167 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM 165 * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
@@ -172,5 +170,3 @@ ENDPROC(cpu_v7_set_pte_ext)
172 .type v7_crval, #object 170 .type v7_crval, #object
173v7_crval: 171v7_crval:
174 crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c 172 crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
175
176 .previous
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 5ffe1956c6d9..c36ac69488c8 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -140,8 +140,6 @@ ENDPROC(cpu_v7_set_pte_ext)
140 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 140 mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0
141 .endm 141 .endm
142 142
143 __CPUINIT
144
145 /* 143 /*
146 * AT 144 * AT
147 * TFR EV X F IHD LR S 145 * TFR EV X F IHD LR S
@@ -153,5 +151,3 @@ ENDPROC(cpu_v7_set_pte_ext)
153 .type v7_crval, #object 151 .type v7_crval, #object
154v7_crval: 152v7_crval:
155 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c 153 crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
156
157 .previous
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7ef3ad05df39..5c6d5a3050ea 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -167,8 +167,6 @@ ENDPROC(cpu_pj4b_do_idle)
167 167
168#endif 168#endif
169 169
170 __CPUINIT
171
172/* 170/*
173 * __v7_setup 171 * __v7_setup
174 * 172 *
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..dc1645890042 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -446,8 +446,6 @@ ENTRY(cpu_xsc3_do_resume)
446ENDPROC(cpu_xsc3_do_resume) 446ENDPROC(cpu_xsc3_do_resume)
447#endif 447#endif
448 448
449 __CPUINIT
450
451 .type __xsc3_setup, #function 449 .type __xsc3_setup, #function
452__xsc3_setup: 450__xsc3_setup:
453 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 451 mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index e766f889bfd6..d19b1cfcad91 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -558,8 +558,6 @@ ENTRY(cpu_xscale_do_resume)
558ENDPROC(cpu_xscale_do_resume) 558ENDPROC(cpu_xscale_do_resume)
559#endif 559#endif
560 560
561 __CPUINIT
562
563 .type __xscale_setup, #function 561 .type __xscale_setup, #function
564__xscale_setup: 562__xscale_setup:
565 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 563 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3dc5cbea86cc..a5b5ff6e68d2 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -29,6 +29,13 @@ config PLAT_S5P
29 help 29 help
30 Base platform code for Samsung's S5P series SoC. 30 Base platform code for Samsung's S5P series SoC.
31 31
32config SAMSUNG_PM
33 bool
34 depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || S5P_PM)
35 default y
36 help
37 Base platform power management code for samsung code
38
32if PLAT_SAMSUNG 39if PLAT_SAMSUNG
33 40
34# boot configurations 41# boot configurations
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 98d07d8fc7a7..199bbe304d02 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
51 51
52# PM support 52# PM support
53 53
54obj-$(CONFIG_PM) += pm.o 54obj-$(CONFIG_SAMSUNG_PM) += pm.o
55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o 55obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o
56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o 56obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
57 57
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15ba..df45d6edc98d 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
83extern struct clksrc_clk clk_epllref; 83extern struct clksrc_clk clk_epllref;
84extern struct clksrc_clk clk_esysclk; 84extern struct clksrc_clk clk_esysclk;
85 85
86/* S3C24XX UART clocks */
87extern struct clk s3c24xx_clk_uart0;
88extern struct clk s3c24xx_clk_uart1;
89extern struct clk s3c24xx_clk_uart2;
90
86/* S3C64XX specific clocks */ 91/* S3C64XX specific clocks */
87extern struct clk clk_h2; 92extern struct clk clk_h2;
88extern struct clk clk_27m; 93extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 5d47ca35cabd..6bc1a8f471e3 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -19,7 +19,7 @@
19 19
20struct device; 20struct device;
21 21
22#ifdef CONFIG_PM 22#ifdef CONFIG_SAMSUNG_PM
23 23
24extern __init int s3c_pm_init(void); 24extern __init int s3c_pm_init(void);
25extern __init int s3c64xx_pm_init(void); 25extern __init int s3c64xx_pm_init(void);
@@ -58,8 +58,6 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
58 58
59/* from sleep.S */ 59/* from sleep.S */
60 60
61extern void s3c_cpu_resume(void);
62
63extern int s3c2410_cpu_suspend(unsigned long); 61extern int s3c2410_cpu_suspend(unsigned long);
64 62
65/* sleep save info */ 63/* sleep save info */
@@ -106,12 +104,14 @@ extern void s3c_pm_do_save(struct sleep_save *ptr, int count);
106extern void s3c_pm_do_restore(struct sleep_save *ptr, int count); 104extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
107extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count); 105extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
108 106
109#ifdef CONFIG_PM 107#ifdef CONFIG_SAMSUNG_PM
110extern int s3c_irq_wake(struct irq_data *data, unsigned int state); 108extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
111extern int s3c_irqext_wake(struct irq_data *data, unsigned int state); 109extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
110extern void s3c_cpu_resume(void);
112#else 111#else
113#define s3c_irq_wake NULL 112#define s3c_irq_wake NULL
114#define s3c_irqext_wake NULL 113#define s3c_irqext_wake NULL
114#define s3c_cpu_resume NULL
115#endif 115#endif
116 116
117/* PM debug functions */ 117/* PM debug functions */
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ea3613642451..d0c23010b693 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -80,7 +80,7 @@ unsigned char pm_uart_udivslot;
80 80
81#ifdef CONFIG_SAMSUNG_PM_DEBUG 81#ifdef CONFIG_SAMSUNG_PM_DEBUG
82 82
83static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; 83static struct pm_uart_save uart_save;
84 84
85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) 85static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
86{ 86{
@@ -101,11 +101,7 @@ static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
101 101
102static void s3c_pm_save_uarts(void) 102static void s3c_pm_save_uarts(void)
103{ 103{
104 struct pm_uart_save *save = uart_save; 104 s3c_pm_save_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
105 unsigned int uart;
106
107 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
108 s3c_pm_save_uart(uart, save);
109} 105}
110 106
111static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save) 107static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
@@ -126,11 +122,7 @@ static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
126 122
127static void s3c_pm_restore_uarts(void) 123static void s3c_pm_restore_uarts(void)
128{ 124{
129 struct pm_uart_save *save = uart_save; 125 s3c_pm_restore_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
130 unsigned int uart;
131
132 for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
133 s3c_pm_restore_uart(uart, save);
134} 126}
135#else 127#else
136static void s3c_pm_save_uarts(void) { } 128static void s3c_pm_save_uarts(void) { }
diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c
index 1e1b2d769748..39895d892c3b 100644
--- a/arch/arm/plat-versatile/platsmp.c
+++ b/arch/arm/plat-versatile/platsmp.c
@@ -23,7 +23,7 @@
23 * observers, irrespective of whether they're taking part in coherency 23 * observers, irrespective of whether they're taking part in coherency
24 * or not. This is necessary for the hotplug code to work reliably. 24 * or not. This is necessary for the hotplug code to work reliably.
25 */ 25 */
26static void __cpuinit write_pen_release(int val) 26static void write_pen_release(int val)
27{ 27{
28 pen_release = val; 28 pen_release = val;
29 smp_wmb(); 29 smp_wmb();
@@ -33,7 +33,7 @@ static void __cpuinit write_pen_release(int val)
33 33
34static DEFINE_SPINLOCK(boot_lock); 34static DEFINE_SPINLOCK(boot_lock);
35 35
36void __cpuinit versatile_secondary_init(unsigned int cpu) 36void versatile_secondary_init(unsigned int cpu)
37{ 37{
38 /* 38 /*
39 * let the primary processor know we're out of the 39 * let the primary processor know we're out of the
@@ -48,7 +48,7 @@ void __cpuinit versatile_secondary_init(unsigned int cpu)
48 spin_unlock(&boot_lock); 48 spin_unlock(&boot_lock);
49} 49}
50 50
51int __cpuinit versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) 51int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
52{ 52{
53 unsigned long timeout; 53 unsigned long timeout;
54 54
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index d56ed11ba9a3..98abd476992d 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -97,7 +97,7 @@ static inline u32 arch_timer_get_cntfrq(void)
97 return val; 97 return val;
98} 98}
99 99
100static inline void __cpuinit arch_counter_set_user_access(void) 100static inline void arch_counter_set_user_access(void)
101{ 101{
102 u32 cntkctl; 102 u32 cntkctl;
103 103
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c68c09..a2232d07be9d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
83} 83}
84#endif 84#endif
85 85
86#ifdef CONFIG_COMPAT
87int aarch32_break_handler(struct pt_regs *regs); 86int aarch32_break_handler(struct pt_regs *regs);
88#else
89static int aarch32_break_handler(struct pt_regs *regs)
90{
91 return -EFAULT;
92}
93#endif
94 87
95#endif /* __ASSEMBLY */ 88#endif /* __ASSEMBLY */
96#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index a6e1750369ef..7a18fabbe0f6 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,7 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/linkage.h> 24#include <linux/linkage.h>
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/reboot.h>
26 27
27struct pt_regs; 28struct pt_regs;
28 29
@@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
41extern void __show_regs(struct pt_regs *); 42extern void __show_regs(struct pt_regs *);
42 43
43void soft_restart(unsigned long); 44void soft_restart(unsigned long);
44extern void (*arm_pm_restart)(char str, const char *cmd); 45extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
45 46
46#define UDBG_UNDEFINED (1 << 0) 47#define UDBG_UNDEFINED (1 << 0)
47#define UDBG_SYSCALL (1 << 1) 48#define UDBG_SYSCALL (1 << 1)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071d..23a3c4791d86 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
24#include <linux/compiler.h> 24#include <linux/compiler.h>
25 25
26#ifndef CONFIG_ARM64_64K_PAGES 26#ifndef CONFIG_ARM64_64K_PAGES
27#define THREAD_SIZE_ORDER 1 27#define THREAD_SIZE_ORDER 2
28#endif 28#endif
29 29
30#define THREAD_SIZE 8192 30#define THREAD_SIZE 16384
31#define THREAD_START_SP (THREAD_SIZE - 16) 31#define THREAD_START_SP (THREAD_SIZE - 16)
32 32
33#ifndef __ASSEMBLY__ 33#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3d..26e310c54344 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -21,6 +21,7 @@
21#define BOOT_CPU_MODE_EL2 (0x0e12b007) 21#define BOOT_CPU_MODE_EL2 (0x0e12b007)
22 22
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24#include <asm/cacheflush.h>
24 25
25/* 26/*
26 * __boot_cpu_mode records what mode CPUs were booted in. 27 * __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
36void __hyp_set_vectors(phys_addr_t phys_vector_base); 37void __hyp_set_vectors(phys_addr_t phys_vector_base);
37phys_addr_t __hyp_get_vectors(void); 38phys_addr_t __hyp_get_vectors(void);
38 39
40static inline void sync_boot_mode(void)
41{
42 /*
43 * As secondaries write to __boot_cpu_mode with caches disabled, we
44 * must flush the corresponding cache entries to ensure the visibility
45 * of their writes.
46 */
47 __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
48}
49
39/* Reports the availability of HYP mode */ 50/* Reports the availability of HYP mode */
40static inline bool is_hyp_mode_available(void) 51static inline bool is_hyp_mode_available(void)
41{ 52{
53 sync_boot_mode();
42 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 && 54 return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
43 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2); 55 __boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
44} 56}
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
46/* Check if the bootloader has booted CPUs in different modes */ 58/* Check if the bootloader has booted CPUs in different modes */
47static inline bool is_hyp_mode_mismatched(void) 59static inline bool is_hyp_mode_mismatched(void)
48{ 60{
61 sync_boot_mode();
49 return __boot_cpu_mode[0] != __boot_cpu_mode[1]; 62 return __boot_cpu_mode[0] != __boot_cpu_mode[1];
50} 63}
51 64
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 08018e3df580..cbfacf7fb438 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -141,7 +141,7 @@ static void clear_os_lock(void *unused)
141 isb(); 141 isb();
142} 142}
143 143
144static int __cpuinit os_lock_notify(struct notifier_block *self, 144static int os_lock_notify(struct notifier_block *self,
145 unsigned long action, void *data) 145 unsigned long action, void *data)
146{ 146{
147 int cpu = (unsigned long)data; 147 int cpu = (unsigned long)data;
@@ -150,11 +150,11 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
150 return NOTIFY_OK; 150 return NOTIFY_OK;
151} 151}
152 152
153static struct notifier_block __cpuinitdata os_lock_nb = { 153static struct notifier_block os_lock_nb = {
154 .notifier_call = os_lock_notify, 154 .notifier_call = os_lock_notify,
155}; 155};
156 156
157static int __cpuinit debug_monitors_init(void) 157static int debug_monitors_init(void)
158{ 158{
159 /* Clear the OS lock. */ 159 /* Clear the OS lock. */
160 smp_call_function(clear_os_lock, NULL, 1); 160 smp_call_function(clear_os_lock, NULL, 1);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a03..6ad781b21c08 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
121 121
122 .macro get_thread_info, rd 122 .macro get_thread_info, rd
123 mov \rd, sp 123 mov \rd, sp
124 and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack 124 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
125 .endm 125 .endm
126 126
127/* 127/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 5ab825c59db9..329218ca9ffb 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -821,7 +821,7 @@ static void reset_ctrl_regs(void *unused)
821 } 821 }
822} 822}
823 823
824static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, 824static int hw_breakpoint_reset_notify(struct notifier_block *self,
825 unsigned long action, 825 unsigned long action,
826 void *hcpu) 826 void *hcpu)
827{ 827{
@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
831 return NOTIFY_OK; 831 return NOTIFY_OK;
832} 832}
833 833
834static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { 834static struct notifier_block hw_breakpoint_reset_nb = {
835 .notifier_call = hw_breakpoint_reset_notify, 835 .notifier_call = hw_breakpoint_reset_notify,
836}; 836};
837 837
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..57fb55c44c90 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
81void (*pm_power_off)(void); 81void (*pm_power_off)(void);
82EXPORT_SYMBOL_GPL(pm_power_off); 82EXPORT_SYMBOL_GPL(pm_power_off);
83 83
84void (*arm_pm_restart)(char str, const char *cmd); 84void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
85EXPORT_SYMBOL_GPL(arm_pm_restart); 85EXPORT_SYMBOL_GPL(arm_pm_restart);
86 86
87void arch_cpu_idle_prepare(void) 87void arch_cpu_idle_prepare(void)
@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
132 132
133 /* Now call the architecture specific reboot code. */ 133 /* Now call the architecture specific reboot code. */
134 if (arm_pm_restart) 134 if (arm_pm_restart)
135 arm_pm_restart('h', cmd); 135 arm_pm_restart(reboot_mode, cmd);
136 136
137 /* 137 /*
138 * Whoops - the architecture was unable to reboot. 138 * Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e3717bf8..fee5cce83450 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -71,7 +71,7 @@ static DEFINE_RAW_SPINLOCK(boot_lock);
71 * in coherency or not. This is necessary for the hotplug code to work 71 * in coherency or not. This is necessary for the hotplug code to work
72 * reliably. 72 * reliably.
73 */ 73 */
74static void __cpuinit write_pen_release(u64 val) 74static void write_pen_release(u64 val)
75{ 75{
76 void *start = (void *)&secondary_holding_pen_release; 76 void *start = (void *)&secondary_holding_pen_release;
77 unsigned long size = sizeof(secondary_holding_pen_release); 77 unsigned long size = sizeof(secondary_holding_pen_release);
@@ -84,7 +84,7 @@ static void __cpuinit write_pen_release(u64 val)
84 * Boot a secondary CPU, and assign it the specified idle task. 84 * Boot a secondary CPU, and assign it the specified idle task.
85 * This also gives us the initial stack to use for this CPU. 85 * This also gives us the initial stack to use for this CPU.
86 */ 86 */
87static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) 87static int boot_secondary(unsigned int cpu, struct task_struct *idle)
88{ 88{
89 unsigned long timeout; 89 unsigned long timeout;
90 90
@@ -122,7 +122,7 @@ static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
122 122
123static DECLARE_COMPLETION(cpu_running); 123static DECLARE_COMPLETION(cpu_running);
124 124
125int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 125int __cpu_up(unsigned int cpu, struct task_struct *idle)
126{ 126{
127 int ret; 127 int ret;
128 128
@@ -162,7 +162,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
162 * This is the secondary CPU boot entry. We're using this CPUs 162 * This is the secondary CPU boot entry. We're using this CPUs
163 * idle thread stack, but a set of temporary page tables. 163 * idle thread stack, but a set of temporary page tables.
164 */ 164 */
165asmlinkage void __cpuinit secondary_start_kernel(void) 165asmlinkage void secondary_start_kernel(void)
166{ 166{
167 struct mm_struct *mm = &init_mm; 167 struct mm_struct *mm = &init_mm;
168 unsigned int cpu = smp_processor_id(); 168 unsigned int cpu = smp_processor_id();
@@ -200,13 +200,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
200 raw_spin_unlock(&boot_lock); 200 raw_spin_unlock(&boot_lock);
201 201
202 /* 202 /*
203 * Enable local interrupts.
204 */
205 notify_cpu_starting(cpu);
206 local_irq_enable();
207 local_fiq_enable();
208
209 /*
210 * OK, now it's safe to let the boot CPU continue. Wait for 203 * OK, now it's safe to let the boot CPU continue. Wait for
211 * the CPU migration code to notice that the CPU is online 204 * the CPU migration code to notice that the CPU is online
212 * before we continue. 205 * before we continue.
@@ -215,6 +208,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
215 complete(&cpu_running); 208 complete(&cpu_running);
216 209
217 /* 210 /*
211 * Enable GIC and timers.
212 */
213 notify_cpu_starting(cpu);
214
215 local_irq_enable();
216 local_fiq_enable();
217
218 /*
218 * OK, it's off to the idle thread for us 219 * OK, it's off to the idle thread for us
219 */ 220 */
220 cpu_startup_entry(CPUHP_ONLINE); 221 cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0ecac8980aae..6c8ba25bf6bb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
152#define ESR_CM (1 << 8) 152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155/*
156 * Check that the permissions on the VMA allow for the fault which occurred.
157 * If we encountered a write fault, we must have write permission, otherwise
158 * we allow any permission.
159 */
160static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
161{
162 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
163
164 if (esr & ESR_WRITE)
165 mask = VM_WRITE;
166 if (esr & ESR_LNX_EXEC)
167 mask = VM_EXEC;
168
169 return vma->vm_flags & mask ? false : true;
170}
171
172static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173 unsigned int esr, unsigned int flags, 156 unsigned int mm_flags, unsigned long vm_flags,
174 struct task_struct *tsk) 157 struct task_struct *tsk)
175{ 158{
176 struct vm_area_struct *vma; 159 struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
188 * it. 171 * it.
189 */ 172 */
190good_area: 173good_area:
191 if (access_error(esr, vma)) { 174 /*
175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have
177 * appropriate permissions, otherwise we allow any permission.
178 */
179 if (!(vma->vm_flags & vm_flags)) {
192 fault = VM_FAULT_BADACCESS; 180 fault = VM_FAULT_BADACCESS;
193 goto out; 181 goto out;
194 } 182 }
195 183
196 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 184 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
197 185
198check_stack: 186check_stack:
199 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 187 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
208 struct task_struct *tsk; 196 struct task_struct *tsk;
209 struct mm_struct *mm; 197 struct mm_struct *mm;
210 int fault, sig, code; 198 int fault, sig, code;
211 bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); 199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
213 (write ? FAULT_FLAG_WRITE : 0); 201
202 if (esr & ESR_LNX_EXEC) {
203 vm_flags = VM_EXEC;
204 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
205 vm_flags = VM_WRITE;
206 mm_flags |= FAULT_FLAG_WRITE;
207 }
214 208
215 tsk = current; 209 tsk = current;
216 mm = tsk->mm; 210 mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
248#endif 242#endif
249 } 243 }
250 244
251 fault = __do_page_fault(mm, addr, esr, flags, tsk); 245 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
252 246
253 /* 247 /*
254 * If we need to retry but a fatal signal is pending, handle the 248 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
265 */ 259 */
266 260
267 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
268 if (flags & FAULT_FLAG_ALLOW_RETRY) { 262 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
269 if (fault & VM_FAULT_MAJOR) { 263 if (fault & VM_FAULT_MAJOR) {
270 tsk->maj_flt++; 264 tsk->maj_flt++;
271 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, 265 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
280 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of 274 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281 * starvation. 275 * starvation.
282 */ 276 */
283 flags &= ~FAULT_FLAG_ALLOW_RETRY; 277 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
284 goto retry; 278 goto retry;
285 } 279 }
286 } 280 }
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index e47d19ae3e06..974e55496db3 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -468,7 +468,7 @@ static void bfin_pmu_setup(int cpu)
468 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 468 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
469} 469}
470 470
471static int __cpuinit 471static int
472bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 472bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (long)hcpu; 474 unsigned int cpu = (long)hcpu;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 107b306b06f1..19ad0637e8ff 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -99,7 +99,7 @@ void __init generate_cplb_tables(void)
99} 99}
100#endif 100#endif
101 101
102void __cpuinit bfin_setup_caches(unsigned int cpu) 102void bfin_setup_caches(unsigned int cpu)
103{ 103{
104#ifdef CONFIG_BFIN_ICACHE 104#ifdef CONFIG_BFIN_ICACHE
105 bfin_icache_init(icplb_tbl[cpu]); 105 bfin_icache_init(icplb_tbl[cpu]);
@@ -165,7 +165,7 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
165#endif 165#endif
166} 166}
167 167
168void __cpuinit bfin_setup_cpudata(unsigned int cpu) 168void bfin_setup_cpudata(unsigned int cpu)
169{ 169{
170 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 170 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
171 171
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c
index c77a23bc9de3..11789beca75a 100644
--- a/arch/blackfin/mach-bf561/smp.c
+++ b/arch/blackfin/mach-bf561/smp.c
@@ -48,7 +48,7 @@ int __init setup_profiling_timer(unsigned int multiplier) /* not supported */
48 return -EINVAL; 48 return -EINVAL;
49} 49}
50 50
51void __cpuinit platform_secondary_init(unsigned int cpu) 51void platform_secondary_init(unsigned int cpu)
52{ 52{
53 /* Clone setup for peripheral interrupt sources from CoreA. */ 53 /* Clone setup for peripheral interrupt sources from CoreA. */
54 bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0()); 54 bfin_write_SICB_IMASK0(bfin_read_SIC_IMASK0());
@@ -73,7 +73,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
73 spin_unlock(&boot_lock); 73 spin_unlock(&boot_lock);
74} 74}
75 75
76int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle) 76int platform_boot_secondary(unsigned int cpu, struct task_struct *idle)
77{ 77{
78 unsigned long timeout; 78 unsigned long timeout;
79 79
@@ -154,7 +154,7 @@ void platform_clear_ipi(unsigned int cpu, int irq)
154 * Setup core B's local core timer. 154 * Setup core B's local core timer.
155 * In SMP, core timer is used for clock event device. 155 * In SMP, core timer is used for clock event device.
156 */ 156 */
157void __cpuinit bfin_local_timer_setup(void) 157void bfin_local_timer_setup(void)
158{ 158{
159#if defined(CONFIG_TICKSOURCE_CORETMR) 159#if defined(CONFIG_TICKSOURCE_CORETMR)
160 struct irq_data *data = irq_get_irq_data(IRQ_CORETMR); 160 struct irq_data *data = irq_get_irq_data(IRQ_CORETMR);
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
index a60a24f5035d..0e1e451fd7d8 100644
--- a/arch/blackfin/mach-common/cache-c.c
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -52,7 +52,7 @@ bfin_cache_init(struct cplb_entry *cplb_tbl, unsigned long cplb_addr,
52} 52}
53 53
54#ifdef CONFIG_BFIN_ICACHE 54#ifdef CONFIG_BFIN_ICACHE
55void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl) 55void bfin_icache_init(struct cplb_entry *icplb_tbl)
56{ 56{
57 bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL, 57 bfin_cache_init(icplb_tbl, ICPLB_ADDR0, ICPLB_DATA0, IMEM_CONTROL,
58 (IMC | ENICPLB)); 58 (IMC | ENICPLB));
@@ -60,7 +60,7 @@ void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
60#endif 60#endif
61 61
62#ifdef CONFIG_BFIN_DCACHE 62#ifdef CONFIG_BFIN_DCACHE
63void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl) 63void bfin_dcache_init(struct cplb_entry *dcplb_tbl)
64{ 64{
65 /* 65 /*
66 * Anomaly notes: 66 * Anomaly notes:
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 6c0c6816a51a..d143fd8d2bc5 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1281,7 +1281,7 @@ static struct irq_chip bfin_gpio_irqchip = {
1281 .irq_set_wake = bfin_gpio_set_wake, 1281 .irq_set_wake = bfin_gpio_set_wake,
1282}; 1282};
1283 1283
1284void __cpuinit init_exception_vectors(void) 1284void init_exception_vectors(void)
1285{ 1285{
1286 /* cannot program in software: 1286 /* cannot program in software:
1287 * evt0 - emulation (jtag) 1287 * evt0 - emulation (jtag)
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 961d8392e5e3..82f301c117a5 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -46,7 +46,7 @@ struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
46unsigned long blackfin_iflush_l1_entry[NR_CPUS]; 46unsigned long blackfin_iflush_l1_entry[NR_CPUS];
47#endif 47#endif
48 48
49struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 49struct blackfin_initial_pda initial_pda_coreb;
50 50
51enum ipi_message_type { 51enum ipi_message_type {
52 BFIN_IPI_NONE, 52 BFIN_IPI_NONE,
@@ -147,7 +147,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
147 platform_clear_ipi(cpu, IRQ_SUPPLE_1); 147 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
148 148
149 bfin_ipi_data = &__get_cpu_var(bfin_ipi); 149 bfin_ipi_data = &__get_cpu_var(bfin_ipi);
150 while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { 150 while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
151 msg = 0; 151 msg = 0;
152 do { 152 do {
153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); 153 msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
@@ -182,8 +182,8 @@ static void bfin_ipi_init(void)
182 struct ipi_data *bfin_ipi_data; 182 struct ipi_data *bfin_ipi_data;
183 for_each_possible_cpu(cpu) { 183 for_each_possible_cpu(cpu) {
184 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 184 bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
185 bfin_ipi_data->bits = 0; 185 atomic_set(&bfin_ipi_data->bits, 0);
186 bfin_ipi_data->count = 0; 186 atomic_set(&bfin_ipi_data->count, 0);
187 } 187 }
188} 188}
189 189
@@ -246,7 +246,7 @@ void smp_send_stop(void)
246 return; 246 return;
247} 247}
248 248
249int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 249int __cpu_up(unsigned int cpu, struct task_struct *idle)
250{ 250{
251 int ret; 251 int ret;
252 252
@@ -259,7 +259,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
259 return ret; 259 return ret;
260} 260}
261 261
262static void __cpuinit setup_secondary(unsigned int cpu) 262static void setup_secondary(unsigned int cpu)
263{ 263{
264 unsigned long ilat; 264 unsigned long ilat;
265 265
@@ -277,7 +277,7 @@ static void __cpuinit setup_secondary(unsigned int cpu)
277 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 277 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
278} 278}
279 279
280void __cpuinit secondary_start_kernel(void) 280void secondary_start_kernel(void)
281{ 281{
282 unsigned int cpu = smp_processor_id(); 282 unsigned int cpu = smp_processor_id();
283 struct mm_struct *mm = &init_mm; 283 struct mm_struct *mm = &init_mm;
@@ -402,7 +402,7 @@ EXPORT_SYMBOL(resync_core_dcache);
402#endif 402#endif
403 403
404#ifdef CONFIG_HOTPLUG_CPU 404#ifdef CONFIG_HOTPLUG_CPU
405int __cpuexit __cpu_disable(void) 405int __cpu_disable(void)
406{ 406{
407 unsigned int cpu = smp_processor_id(); 407 unsigned int cpu = smp_processor_id();
408 408
@@ -415,7 +415,7 @@ int __cpuexit __cpu_disable(void)
415 415
416static DECLARE_COMPLETION(cpu_killed); 416static DECLARE_COMPLETION(cpu_killed);
417 417
418int __cpuexit __cpu_die(unsigned int cpu) 418int __cpu_die(unsigned int cpu)
419{ 419{
420 return wait_for_completion_timeout(&cpu_killed, 5000); 420 return wait_for_completion_timeout(&cpu_killed, 5000);
421} 421}
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c
index cdd12028de0c..fe8e6039db2a 100644
--- a/arch/cris/arch-v32/kernel/smp.c
+++ b/arch/cris/arch-v32/kernel/smp.c
@@ -197,7 +197,7 @@ int setup_profiling_timer(unsigned int multiplier)
197 */ 197 */
198unsigned long cache_decay_ticks = 1; 198unsigned long cache_decay_ticks = 1;
199 199
200int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 200int __cpu_up(unsigned int cpu, struct task_struct *tidle)
201{ 201{
202 smp_boot_one_cpu(cpu, tidle); 202 smp_boot_one_cpu(cpu, tidle);
203 return cpu_online(cpu) ? 0 : -ENOSYS; 203 return cpu_online(cpu) ? 0 : -ENOSYS;
diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c
index ae3a6706419b..9f3a7a62d787 100644
--- a/arch/frv/kernel/setup.c
+++ b/arch/frv/kernel/setup.c
@@ -709,7 +709,7 @@ static void __init reserve_dma_coherent(void)
709/* 709/*
710 * calibrate the delay loop 710 * calibrate the delay loop
711 */ 711 */
712void __cpuinit calibrate_delay(void) 712void calibrate_delay(void)
713{ 713{
714 loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ); 714 loops_per_jiffy = __delay_loops_MHz * (1000000 / HZ);
715 715
diff --git a/arch/hexagon/kernel/setup.c b/arch/hexagon/kernel/setup.c
index bfe13311d70d..29d1f1b00016 100644
--- a/arch/hexagon/kernel/setup.c
+++ b/arch/hexagon/kernel/setup.c
@@ -41,7 +41,7 @@ static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
41 41
42int on_simulator; 42int on_simulator;
43 43
44void __cpuinit calibrate_delay(void) 44void calibrate_delay(void)
45{ 45{
46 loops_per_jiffy = thread_freq_mhz * 1000000 / HZ; 46 loops_per_jiffy = thread_freq_mhz * 1000000 / HZ;
47} 47}
diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c
index 0e364ca43198..9faaa940452b 100644
--- a/arch/hexagon/kernel/smp.c
+++ b/arch/hexagon/kernel/smp.c
@@ -146,7 +146,7 @@ void __init smp_prepare_boot_cpu(void)
146 * to point to current thread info 146 * to point to current thread info
147 */ 147 */
148 148
149void __cpuinit start_secondary(void) 149void start_secondary(void)
150{ 150{
151 unsigned int cpu; 151 unsigned int cpu;
152 unsigned long thread_ptr; 152 unsigned long thread_ptr;
@@ -194,7 +194,7 @@ void __cpuinit start_secondary(void)
194 * maintains control until "cpu_online(cpu)" is set. 194 * maintains control until "cpu_online(cpu)" is set.
195 */ 195 */
196 196
197int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 197int __cpu_up(unsigned int cpu, struct task_struct *idle)
198{ 198{
199 struct thread_info *thread = (struct thread_info *)idle->stack; 199 struct thread_info *thread = (struct thread_info *)idle->stack;
200 void *stack_start; 200 void *stack_start;
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c
index 0ac558adc605..bb21f4f63170 100644
--- a/arch/m32r/kernel/smpboot.c
+++ b/arch/m32r/kernel/smpboot.c
@@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id)
343 } 343 }
344} 344}
345 345
346int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle) 346int __cpu_up(unsigned int cpu_id, struct task_struct *tidle)
347{ 347{
348 int timeout; 348 int timeout;
349 349
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 5b18888ee364..5cc4d4dcf3cf 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -813,8 +813,8 @@ static struct metag_pmu _metag_pmu = {
813}; 813};
814 814
815/* PMU CPU hotplug notifier */ 815/* PMU CPU hotplug notifier */
816static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b, 816static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
817 unsigned long action, void *hcpu) 817 void *hcpu)
818{ 818{
819 unsigned int cpu = (unsigned int)hcpu; 819 unsigned int cpu = (unsigned int)hcpu;
820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); 820 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
@@ -828,7 +828,7 @@ static int __cpuinit metag_pmu_cpu_notify(struct notifier_block *b,
828 return NOTIFY_OK; 828 return NOTIFY_OK;
829} 829}
830 830
831static struct notifier_block __cpuinitdata metag_pmu_notifier = { 831static struct notifier_block metag_pmu_notifier = {
832 .notifier_call = metag_pmu_cpu_notify, 832 .notifier_call = metag_pmu_cpu_notify,
833}; 833};
834 834
diff --git a/arch/metag/kernel/smp.c b/arch/metag/kernel/smp.c
index e413875cf6d2..7c0113142981 100644
--- a/arch/metag/kernel/smp.c
+++ b/arch/metag/kernel/smp.c
@@ -68,7 +68,7 @@ static DECLARE_COMPLETION(cpu_running);
68/* 68/*
69 * "thread" is assumed to be a valid Meta hardware thread ID. 69 * "thread" is assumed to be a valid Meta hardware thread ID.
70 */ 70 */
71int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle) 71int boot_secondary(unsigned int thread, struct task_struct *idle)
72{ 72{
73 u32 val; 73 u32 val;
74 74
@@ -118,11 +118,9 @@ int __cpuinit boot_secondary(unsigned int thread, struct task_struct *idle)
118 * If the cache partition has changed, prints a message to the log describing 118 * If the cache partition has changed, prints a message to the log describing
119 * those changes. 119 * those changes.
120 */ 120 */
121static __cpuinit void describe_cachepart_change(unsigned int thread, 121static void describe_cachepart_change(unsigned int thread, const char *label,
122 const char *label, 122 unsigned int sz, unsigned int old,
123 unsigned int sz, 123 unsigned int new)
124 unsigned int old,
125 unsigned int new)
126{ 124{
127 unsigned int lor1, land1, gor1, gand1; 125 unsigned int lor1, land1, gor1, gand1;
128 unsigned int lor2, land2, gor2, gand2; 126 unsigned int lor2, land2, gor2, gand2;
@@ -170,7 +168,7 @@ static __cpuinit void describe_cachepart_change(unsigned int thread,
170 * Ensures that coherency is enabled and that the threads share the same cache 168 * Ensures that coherency is enabled and that the threads share the same cache
171 * partitions. 169 * partitions.
172 */ 170 */
173static __cpuinit void setup_smp_cache(unsigned int thread) 171static void setup_smp_cache(unsigned int thread)
174{ 172{
175 unsigned int this_thread, lflags; 173 unsigned int this_thread, lflags;
176 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new; 174 unsigned int dcsz, dcpart_this, dcpart_old, dcpart_new;
@@ -215,7 +213,7 @@ static __cpuinit void setup_smp_cache(unsigned int thread)
215 icpart_old, icpart_new); 213 icpart_old, icpart_new);
216} 214}
217 215
218int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) 216int __cpu_up(unsigned int cpu, struct task_struct *idle)
219{ 217{
220 unsigned int thread = cpu_2_hwthread_id[cpu]; 218 unsigned int thread = cpu_2_hwthread_id[cpu];
221 int ret; 219 int ret;
@@ -268,7 +266,7 @@ static DECLARE_COMPLETION(cpu_killed);
268/* 266/*
269 * __cpu_disable runs on the processor to be shutdown. 267 * __cpu_disable runs on the processor to be shutdown.
270 */ 268 */
271int __cpuexit __cpu_disable(void) 269int __cpu_disable(void)
272{ 270{
273 unsigned int cpu = smp_processor_id(); 271 unsigned int cpu = smp_processor_id();
274 272
@@ -299,7 +297,7 @@ int __cpuexit __cpu_disable(void)
299 * called on the thread which is asking for a CPU to be shutdown - 297 * called on the thread which is asking for a CPU to be shutdown -
300 * waits until shutdown has completed, or it is timed out. 298 * waits until shutdown has completed, or it is timed out.
301 */ 299 */
302void __cpuexit __cpu_die(unsigned int cpu) 300void __cpu_die(unsigned int cpu)
303{ 301{
304 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1))) 302 if (!wait_for_completion_timeout(&cpu_killed, msecs_to_jiffies(1)))
305 pr_err("CPU%u: unable to kill\n", cpu); 303 pr_err("CPU%u: unable to kill\n", cpu);
@@ -311,7 +309,7 @@ void __cpuexit __cpu_die(unsigned int cpu)
311 * Note that we do not return from this function. If this cpu is 309 * Note that we do not return from this function. If this cpu is
312 * brought online again it will need to run secondary_startup(). 310 * brought online again it will need to run secondary_startup().
313 */ 311 */
314void __cpuexit cpu_die(void) 312void cpu_die(void)
315{ 313{
316 local_irq_disable(); 314 local_irq_disable();
317 idle_task_exit(); 315 idle_task_exit();
@@ -326,7 +324,7 @@ void __cpuexit cpu_die(void)
326 * Called by both boot and secondaries to move global data into 324 * Called by both boot and secondaries to move global data into
327 * per-processor storage. 325 * per-processor storage.
328 */ 326 */
329void __cpuinit smp_store_cpu_info(unsigned int cpuid) 327void smp_store_cpu_info(unsigned int cpuid)
330{ 328{
331 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); 329 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid);
332 330
diff --git a/arch/metag/kernel/traps.c b/arch/metag/kernel/traps.c
index c00ade0228ef..25f9d1c2ffec 100644
--- a/arch/metag/kernel/traps.c
+++ b/arch/metag/kernel/traps.c
@@ -812,7 +812,7 @@ static void set_trigger_mask(unsigned int mask)
812} 812}
813#endif 813#endif
814 814
815void __cpuinit per_cpu_trap_init(unsigned long cpu) 815void per_cpu_trap_init(unsigned long cpu)
816{ 816{
817 TBIRES int_context; 817 TBIRES int_context;
818 unsigned int thread = cpu_2_hwthread_id[cpu]; 818 unsigned int thread = cpu_2_hwthread_id[cpu];
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 4758a8fd3e99..c3abed332301 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1702,6 +1702,7 @@ endchoice
1702 1702
1703config KVM_GUEST 1703config KVM_GUEST
1704 bool "KVM Guest Kernel" 1704 bool "KVM Guest Kernel"
1705 depends on BROKEN_ON_SMP
1705 help 1706 help
1706 Select this option if building a guest kernel for KVM (Trap & Emulate) mode 1707 Select this option if building a guest kernel for KVM (Trap & Emulate) mode
1707 1708
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 8be4e856b8b8..80f4ecd42b0d 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -182,7 +182,7 @@ const char *get_system_type(void)
182 return ath79_sys_type; 182 return ath79_sys_type;
183} 183}
184 184
185unsigned int __cpuinit get_c0_compare_int(void) 185unsigned int get_c0_compare_int(void)
186{ 186{
187 return CP0_LEGACY_COMPARE_IRQ; 187 return CP0_LEGACY_COMPARE_IRQ;
188} 188}
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 7181def6037a..9d36774bded1 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1095,7 +1095,7 @@ static void octeon_irq_ip3_ciu(void)
1095 1095
1096static bool octeon_irq_use_ip4; 1096static bool octeon_irq_use_ip4;
1097 1097
1098static void __cpuinit octeon_irq_local_enable_ip4(void *arg) 1098static void octeon_irq_local_enable_ip4(void *arg)
1099{ 1099{
1100 set_c0_status(STATUSF_IP4); 1100 set_c0_status(STATUSF_IP4);
1101} 1101}
@@ -1110,21 +1110,21 @@ static void (*octeon_irq_ip2)(void);
1110static void (*octeon_irq_ip3)(void); 1110static void (*octeon_irq_ip3)(void);
1111static void (*octeon_irq_ip4)(void); 1111static void (*octeon_irq_ip4)(void);
1112 1112
1113void __cpuinitdata (*octeon_irq_setup_secondary)(void); 1113void (*octeon_irq_setup_secondary)(void);
1114 1114
1115void __cpuinit octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h) 1115void octeon_irq_set_ip4_handler(octeon_irq_ip4_handler_t h)
1116{ 1116{
1117 octeon_irq_ip4 = h; 1117 octeon_irq_ip4 = h;
1118 octeon_irq_use_ip4 = true; 1118 octeon_irq_use_ip4 = true;
1119 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1); 1119 on_each_cpu(octeon_irq_local_enable_ip4, NULL, 1);
1120} 1120}
1121 1121
1122static void __cpuinit octeon_irq_percpu_enable(void) 1122static void octeon_irq_percpu_enable(void)
1123{ 1123{
1124 irq_cpu_online(); 1124 irq_cpu_online();
1125} 1125}
1126 1126
1127static void __cpuinit octeon_irq_init_ciu_percpu(void) 1127static void octeon_irq_init_ciu_percpu(void)
1128{ 1128{
1129 int coreid = cvmx_get_core_num(); 1129 int coreid = cvmx_get_core_num();
1130 1130
@@ -1167,7 +1167,7 @@ static void octeon_irq_init_ciu2_percpu(void)
1167 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid)); 1167 cvmx_read_csr(CVMX_CIU2_SUM_PPX_IP2(coreid));
1168} 1168}
1169 1169
1170static void __cpuinit octeon_irq_setup_secondary_ciu(void) 1170static void octeon_irq_setup_secondary_ciu(void)
1171{ 1171{
1172 octeon_irq_init_ciu_percpu(); 1172 octeon_irq_init_ciu_percpu();
1173 octeon_irq_percpu_enable(); 1173 octeon_irq_percpu_enable();
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 7b746e7bf7a1..1830874ff1e2 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -334,9 +334,10 @@ static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
334 char name_buffer[20]; 334 char name_buffer[20];
335 int iface; 335 int iface;
336 int p; 336 int p;
337 int count; 337 int count = 0;
338 338
339 count = cvmx_helper_interface_enumerate(idx); 339 if (cvmx_helper_interface_enumerate(idx) == 0)
340 count = cvmx_helper_ports_on_interface(idx);
340 341
341 snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx); 342 snprintf(name_buffer, sizeof(name_buffer), "interface@%d", idx);
342 iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer); 343 iface = fdt_subnode_offset(initial_boot_params, pip, name_buffer);
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 295137dfdc37..138cc80c5928 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -173,7 +173,7 @@ static void octeon_boot_secondary(int cpu, struct task_struct *idle)
173 * After we've done initial boot, this function is called to allow the 173 * After we've done initial boot, this function is called to allow the
174 * board code to clean up state, if needed 174 * board code to clean up state, if needed
175 */ 175 */
176static void __cpuinit octeon_init_secondary(void) 176static void octeon_init_secondary(void)
177{ 177{
178 unsigned int sr; 178 unsigned int sr;
179 179
@@ -375,7 +375,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
375 return 0; 375 return 0;
376} 376}
377 377
378static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb, 378static int octeon_cpu_callback(struct notifier_block *nfb,
379 unsigned long action, void *hcpu) 379 unsigned long action, void *hcpu)
380{ 380{
381 unsigned int cpu = (unsigned long)hcpu; 381 unsigned int cpu = (unsigned long)hcpu;
@@ -394,7 +394,7 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
394 return NOTIFY_OK; 394 return NOTIFY_OK;
395} 395}
396 396
397static int __cpuinit register_cavium_notifier(void) 397static int register_cavium_notifier(void)
398{ 398{
399 hotcpu_notifier(octeon_cpu_callback, 0); 399 hotcpu_notifier(octeon_cpu_callback, 0);
400 return 0; 400 return 0;
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 370d967725c2..c33a9564fb41 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -13,12 +13,8 @@
13 13
14#ifdef CONFIG_EXPORT_UASM 14#ifdef CONFIG_EXPORT_UASM
15#include <linux/export.h> 15#include <linux/export.h>
16#define __uasminit
17#define __uasminitdata
18#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym) 16#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
19#else 17#else
20#define __uasminit __cpuinit
21#define __uasminitdata __cpuinitdata
22#define UASM_EXPORT_SYMBOL(sym) 18#define UASM_EXPORT_SYMBOL(sym)
23#endif 19#endif
24 20
@@ -54,43 +50,36 @@
54#endif 50#endif
55 51
56#define Ip_u1u2u3(op) \ 52#define Ip_u1u2u3(op) \
57void __uasminit \ 53void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
58ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
59 54
60#define Ip_u2u1u3(op) \ 55#define Ip_u2u1u3(op) \
61void __uasminit \ 56void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
62ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
63 57
64#define Ip_u3u1u2(op) \ 58#define Ip_u3u1u2(op) \
65void __uasminit \ 59void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
66ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c)
67 60
68#define Ip_u1u2s3(op) \ 61#define Ip_u1u2s3(op) \
69void __uasminit \ 62void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
70ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
71 63
72#define Ip_u2s3u1(op) \ 64#define Ip_u2s3u1(op) \
73void __uasminit \ 65void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
74ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
75 66
76#define Ip_u2u1s3(op) \ 67#define Ip_u2u1s3(op) \
77void __uasminit \ 68void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
78ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
79 69
80#define Ip_u2u1msbu3(op) \ 70#define Ip_u2u1msbu3(op) \
81void __uasminit \ 71void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
82ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \
83 unsigned int d) 72 unsigned int d)
84 73
85#define Ip_u1u2(op) \ 74#define Ip_u1u2(op) \
86void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) 75void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b)
87 76
88#define Ip_u1s2(op) \ 77#define Ip_u1s2(op) \
89void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b) 78void ISAOPC(op)(u32 **buf, unsigned int a, signed int b)
90 79
91#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a) 80#define Ip_u1(op) void ISAOPC(op)(u32 **buf, unsigned int a)
92 81
93#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf) 82#define Ip_0(op) void ISAOPC(op)(u32 **buf)
94 83
95Ip_u2u1s3(_addiu); 84Ip_u2u1s3(_addiu);
96Ip_u3u1u2(_addu); 85Ip_u3u1u2(_addu);
@@ -163,7 +152,7 @@ struct uasm_label {
163 int lab; 152 int lab;
164}; 153};
165 154
166void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, 155void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr,
167 int lid); 156 int lid);
168#ifdef CONFIG_64BIT 157#ifdef CONFIG_64BIT
169int ISAFUNC(uasm_in_compat_space_p)(long addr); 158int ISAFUNC(uasm_in_compat_space_p)(long addr);
@@ -174,7 +163,7 @@ void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr);
174void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr); 163void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr);
175 164
176#define UASM_L_LA(lb) \ 165#define UASM_L_LA(lb) \
177static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \ 166static inline void ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \
178{ \ 167{ \
179 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \ 168 ISAFUNC(uasm_build_label)(lab, addr, label##lb); \
180} 169}
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 64c4fd62cf08..f739aedcb509 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -28,8 +28,6 @@
28 .set mips0 28 .set mips0
29 .endm 29 .endm
30 30
31 __CPUINIT
32
33/*********************************************************************** 31/***********************************************************************
34 * Alternate CPU1 startup vector for BMIPS4350 32 * Alternate CPU1 startup vector for BMIPS4350
35 * 33 *
@@ -216,8 +214,6 @@ END(bmips_smp_int_vec)
216 * Certain CPUs support extending kseg0 to 1024MB. 214 * Certain CPUs support extending kseg0 to 1024MB.
217 ***********************************************************************/ 215 ***********************************************************************/
218 216
219 __CPUINIT
220
221LEAF(bmips_enable_xks01) 217LEAF(bmips_enable_xks01)
222 218
223#if defined(CONFIG_XKS01) 219#if defined(CONFIG_XKS01)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 15f618b40cf6..7976457184b1 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -109,7 +109,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 109static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
110static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 110static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
111 111
112void __cpuinit sb1480_clockevent_init(void) 112void sb1480_clockevent_init(void)
113{ 113{
114 unsigned int cpu = smp_processor_id(); 114 unsigned int cpu = smp_processor_id();
115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; 115 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 730eaf92c018..594cbbf16d62 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -59,7 +59,7 @@ void gic_event_handler(struct clock_event_device *dev)
59{ 59{
60} 60}
61 61
62int __cpuinit gic_clockevent_init(void) 62int gic_clockevent_init(void)
63{ 63{
64 unsigned int cpu = smp_processor_id(); 64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd; 65 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 02033eaf8825..50d3f5a8d6bb 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -171,7 +171,7 @@ int c0_compare_int_usable(void)
171} 171}
172 172
173#ifndef CONFIG_MIPS_MT_SMTC 173#ifndef CONFIG_MIPS_MT_SMTC
174int __cpuinit r4k_clockevent_init(void) 174int r4k_clockevent_init(void)
175{ 175{
176 unsigned int cpu = smp_processor_id(); 176 unsigned int cpu = smp_processor_id();
177 struct clock_event_device *cd; 177 struct clock_event_device *cd;
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index 200f2778bf36..5ea6d6b1de15 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -107,7 +107,7 @@ static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction); 107static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
108static DEFINE_PER_CPU(char [18], sibyte_hpt_name); 108static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
109 109
110void __cpuinit sb1250_clockevent_init(void) 110void sb1250_clockevent_init(void)
111{ 111{
112 unsigned int cpu = smp_processor_id(); 112 unsigned int cpu = smp_processor_id();
113 unsigned int irq = K_INT_TIMER_0 + cpu; 113 unsigned int irq = K_INT_TIMER_0 + cpu;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 9de5ed7ef1a3..b6cf0a60d896 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -248,7 +248,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
248} 248}
249 249
250 250
251int __cpuinit smtc_clockevent_init(void) 251int smtc_clockevent_init(void)
252{ 252{
253 uint64_t mips_freq = mips_hpt_frequency; 253 uint64_t mips_freq = mips_hpt_frequency;
254 unsigned int cpu = smp_processor_id(); 254 unsigned int cpu = smp_processor_id();
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 0c61df281ce6..2d80b5f1aeae 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -168,7 +168,7 @@ static inline void check_mult_sh(void)
168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar); 168 panic(bug64hit, !R4000_WAR ? r4kwar : nowar);
169} 169}
170 170
171static volatile int daddi_ov __cpuinitdata; 171static volatile int daddi_ov;
172 172
173asmlinkage void __init do_daddi_ov(struct pt_regs *regs) 173asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
174{ 174{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c7b1b3c5a761..4c6167a17875 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,7 +27,7 @@
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30static int __cpuinitdata mips_fpu_disabled; 30static int mips_fpu_disabled;
31 31
32static int __init fpu_disable(char *s) 32static int __init fpu_disable(char *s)
33{ 33{
@@ -39,7 +39,7 @@ static int __init fpu_disable(char *s)
39 39
40__setup("nofpu", fpu_disable); 40__setup("nofpu", fpu_disable);
41 41
42int __cpuinitdata mips_dsp_disabled; 42int mips_dsp_disabled;
43 43
44static int __init dsp_disable(char *s) 44static int __init dsp_disable(char *s)
45{ 45{
@@ -134,7 +134,7 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
134#endif 134#endif
135} 135}
136 136
137static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa) 137static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
138{ 138{
139 switch (isa) { 139 switch (isa) {
140 case MIPS_CPU_ISA_M64R2: 140 case MIPS_CPU_ISA_M64R2:
@@ -159,7 +159,7 @@ static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
159 } 159 }
160} 160}
161 161
162static char unknown_isa[] __cpuinitdata = KERN_ERR \ 162static char unknown_isa[] = KERN_ERR \
163 "Unsupported ISA type, c0.config0: %d."; 163 "Unsupported ISA type, c0.config0: %d.";
164 164
165static inline unsigned int decode_config0(struct cpuinfo_mips *c) 165static inline unsigned int decode_config0(struct cpuinfo_mips *c)
@@ -290,7 +290,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
290 return config4 & MIPS_CONF_M; 290 return config4 & MIPS_CONF_M;
291} 291}
292 292
293static void __cpuinit decode_configs(struct cpuinfo_mips *c) 293static void decode_configs(struct cpuinfo_mips *c)
294{ 294{
295 int ok; 295 int ok;
296 296
@@ -962,7 +962,7 @@ EXPORT_SYMBOL(__ua_limit);
962const char *__cpu_name[NR_CPUS]; 962const char *__cpu_name[NR_CPUS];
963const char *__elf_platform; 963const char *__elf_platform;
964 964
965__cpuinit void cpu_probe(void) 965void cpu_probe(void)
966{ 966{
967 struct cpuinfo_mips *c = &current_cpu_data; 967 struct cpuinfo_mips *c = &current_cpu_data;
968 unsigned int cpu = smp_processor_id(); 968 unsigned int cpu = smp_processor_id();
@@ -1047,7 +1047,7 @@ __cpuinit void cpu_probe(void)
1047#endif 1047#endif
1048} 1048}
1049 1049
1050__cpuinit void cpu_report(void) 1050void cpu_report(void)
1051{ 1051{
1052 struct cpuinfo_mips *c = &current_cpu_data; 1052 struct cpuinfo_mips *c = &current_cpu_data;
1053 1053
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 099912324423..7b6a5b3e3acf 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -158,8 +158,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
158 j start_kernel 158 j start_kernel
159 END(kernel_entry) 159 END(kernel_entry)
160 160
161 __CPUINIT
162
163#ifdef CONFIG_SMP 161#ifdef CONFIG_SMP
164/* 162/*
165 * SMP slave cpus entry point. Board specific code for bootstrap calls this 163 * SMP slave cpus entry point. Board specific code for bootstrap calls this
@@ -188,5 +186,3 @@ NESTED(smp_bootstrap, 16, sp)
188 j start_secondary 186 j start_secondary
189 END(smp_bootstrap) 187 END(smp_bootstrap)
190#endif /* CONFIG_SMP */ 188#endif /* CONFIG_SMP */
191
192 __FINIT
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index aea6c0885838..c0bb4d59076a 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -173,7 +173,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
173 else { 173 else {
174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) 174#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
175 /* Reset slave TP1 if booting from TP0 */ 175 /* Reset slave TP1 if booting from TP0 */
176 if (cpu_logical_map(cpu) == 0) 176 if (cpu_logical_map(cpu) == 1)
177 set_c0_brcm_cmt_ctrl(0x01); 177 set_c0_brcm_cmt_ctrl(0x01);
178#elif defined(CONFIG_CPU_BMIPS5000) 178#elif defined(CONFIG_CPU_BMIPS5000)
179 if (cpu & 0x01) 179 if (cpu & 0x01)
@@ -398,7 +398,7 @@ struct plat_smp_ops bmips_smp_ops = {
398 * UP BMIPS systems as well. 398 * UP BMIPS systems as well.
399 ***********************************************************************/ 399 ***********************************************************************/
400 400
401static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end) 401static void bmips_wr_vec(unsigned long dst, char *start, char *end)
402{ 402{
403 memcpy((void *)dst, start, end - start); 403 memcpy((void *)dst, start, end - start);
404 dma_cache_wback((unsigned long)start, end - start); 404 dma_cache_wback((unsigned long)start, end - start);
@@ -406,7 +406,7 @@ static void __cpuinit bmips_wr_vec(unsigned long dst, char *start, char *end)
406 instruction_hazard(); 406 instruction_hazard();
407} 407}
408 408
409static inline void __cpuinit bmips_nmi_handler_setup(void) 409static inline void bmips_nmi_handler_setup(void)
410{ 410{
411 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec, 411 bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
412 &bmips_reset_nmi_vec_end); 412 &bmips_reset_nmi_vec_end);
@@ -414,7 +414,7 @@ static inline void __cpuinit bmips_nmi_handler_setup(void)
414 &bmips_smp_int_vec_end); 414 &bmips_smp_int_vec_end);
415} 415}
416 416
417void __cpuinit bmips_ebase_setup(void) 417void bmips_ebase_setup(void)
418{ 418{
419 unsigned long new_ebase = ebase; 419 unsigned long new_ebase = ebase;
420 void __iomem __maybe_unused *cbr; 420 void __iomem __maybe_unused *cbr;
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3e5164c11cac..57a3f7a2b370 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -149,7 +149,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
149 vsmp_send_ipi_single(i, action); 149 vsmp_send_ipi_single(i, action);
150} 150}
151 151
152static void __cpuinit vsmp_init_secondary(void) 152static void vsmp_init_secondary(void)
153{ 153{
154#ifdef CONFIG_IRQ_GIC 154#ifdef CONFIG_IRQ_GIC
155 /* This is Malta specific: IPI,performance and timer interrupts */ 155 /* This is Malta specific: IPI,performance and timer interrupts */
@@ -162,7 +162,7 @@ static void __cpuinit vsmp_init_secondary(void)
162 STATUSF_IP6 | STATUSF_IP7); 162 STATUSF_IP6 | STATUSF_IP7);
163} 163}
164 164
165static void __cpuinit vsmp_smp_finish(void) 165static void vsmp_smp_finish(void)
166{ 166{
167 /* CDFIXME: remove this? */ 167 /* CDFIXME: remove this? */
168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -188,7 +188,7 @@ static void vsmp_cpus_done(void)
188 * (unsigned long)idle->thread_info the gp 188 * (unsigned long)idle->thread_info the gp
189 * assumes a 1:1 mapping of TC => VPE 189 * assumes a 1:1 mapping of TC => VPE
190 */ 190 */
191static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) 191static void vsmp_boot_secondary(int cpu, struct task_struct *idle)
192{ 192{
193 struct thread_info *gp = task_thread_info(idle); 193 struct thread_info *gp = task_thread_info(idle);
194 dvpe(); 194 dvpe();
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
index 00500fea2750..7fde3e4d978f 100644
--- a/arch/mips/kernel/smp-up.c
+++ b/arch/mips/kernel/smp-up.c
@@ -28,11 +28,11 @@ static inline void up_send_ipi_mask(const struct cpumask *mask,
28 * After we've done initial boot, this function is called to allow the 28 * After we've done initial boot, this function is called to allow the
29 * board code to clean up state, if needed 29 * board code to clean up state, if needed
30 */ 30 */
31static void __cpuinit up_init_secondary(void) 31static void up_init_secondary(void)
32{ 32{
33} 33}
34 34
35static void __cpuinit up_smp_finish(void) 35static void up_smp_finish(void)
36{ 36{
37} 37}
38 38
@@ -44,7 +44,7 @@ static void up_cpus_done(void)
44/* 44/*
45 * Firmware CPU startup hook 45 * Firmware CPU startup hook
46 */ 46 */
47static void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) 47static void up_boot_secondary(int cpu, struct task_struct *idle)
48{ 48{
49} 49}
50 50
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e7862ab46cc..5c208ed8f856 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -86,7 +86,7 @@ static inline void set_cpu_sibling_map(int cpu)
86struct plat_smp_ops *mp_ops; 86struct plat_smp_ops *mp_ops;
87EXPORT_SYMBOL(mp_ops); 87EXPORT_SYMBOL(mp_ops);
88 88
89__cpuinit void register_smp_ops(struct plat_smp_ops *ops) 89void register_smp_ops(struct plat_smp_ops *ops)
90{ 90{
91 if (mp_ops) 91 if (mp_ops)
92 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 92 printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -98,7 +98,7 @@ __cpuinit void register_smp_ops(struct plat_smp_ops *ops)
98 * First C code run on the secondary CPUs after being started up by 98 * First C code run on the secondary CPUs after being started up by
99 * the master. 99 * the master.
100 */ 100 */
101asmlinkage __cpuinit void start_secondary(void) 101asmlinkage void start_secondary(void)
102{ 102{
103 unsigned int cpu; 103 unsigned int cpu;
104 104
@@ -197,7 +197,7 @@ void smp_prepare_boot_cpu(void)
197 cpu_set(0, cpu_callin_map); 197 cpu_set(0, cpu_callin_map);
198} 198}
199 199
200int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 200int __cpu_up(unsigned int cpu, struct task_struct *tidle)
201{ 201{
202 mp_ops->boot_secondary(cpu, tidle); 202 mp_ops->boot_secondary(cpu, tidle);
203 203
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 75a4fd709841..dfc1b911be04 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -645,7 +645,7 @@ void smtc_prepare_cpus(int cpus)
645 * (unsigned long)idle->thread_info the gp 645 * (unsigned long)idle->thread_info the gp
646 * 646 *
647 */ 647 */
648void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 648void smtc_boot_secondary(int cpu, struct task_struct *idle)
649{ 649{
650 extern u32 kernelsp[NR_CPUS]; 650 extern u32 kernelsp[NR_CPUS];
651 unsigned long flags; 651 unsigned long flags;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d896e20..93f86817f20a 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -37,7 +37,7 @@
37/* 37/*
38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0 38 * Different semantics to the set_c0_* function built by __BUILD_SET_C0
39 */ 39 */
40static __cpuinit unsigned int bis_c0_errctl(unsigned int set) 40static unsigned int bis_c0_errctl(unsigned int set)
41{ 41{
42 unsigned int res; 42 unsigned int res;
43 res = read_c0_errctl(); 43 res = read_c0_errctl();
@@ -45,7 +45,7 @@ static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
45 return res; 45 return res;
46} 46}
47 47
48static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data) 48static void ispram_store_tag(unsigned int offset, unsigned int data)
49{ 49{
50 unsigned int errctl; 50 unsigned int errctl;
51 51
@@ -64,7 +64,7 @@ static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
64} 64}
65 65
66 66
67static __cpuinit unsigned int ispram_load_tag(unsigned int offset) 67static unsigned int ispram_load_tag(unsigned int offset)
68{ 68{
69 unsigned int data; 69 unsigned int data;
70 unsigned int errctl; 70 unsigned int errctl;
@@ -82,7 +82,7 @@ static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
82 return data; 82 return data;
83} 83}
84 84
85static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data) 85static void dspram_store_tag(unsigned int offset, unsigned int data)
86{ 86{
87 unsigned int errctl; 87 unsigned int errctl;
88 88
@@ -98,7 +98,7 @@ static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
98} 98}
99 99
100 100
101static __cpuinit unsigned int dspram_load_tag(unsigned int offset) 101static unsigned int dspram_load_tag(unsigned int offset)
102{ 102{
103 unsigned int data; 103 unsigned int data;
104 unsigned int errctl; 104 unsigned int errctl;
@@ -115,7 +115,7 @@ static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
115 return data; 115 return data;
116} 116}
117 117
118static __cpuinit void probe_spram(char *type, 118static void probe_spram(char *type,
119 unsigned int base, 119 unsigned int base,
120 unsigned int (*read)(unsigned int), 120 unsigned int (*read)(unsigned int),
121 void (*write)(unsigned int, unsigned int)) 121 void (*write)(unsigned int, unsigned int))
@@ -196,7 +196,7 @@ static __cpuinit void probe_spram(char *type,
196 offset += 2 * SPRAM_TAG_STRIDE; 196 offset += 2 * SPRAM_TAG_STRIDE;
197 } 197 }
198} 198}
199void __cpuinit spram_config(void) 199void spram_config(void)
200{ 200{
201 struct cpuinfo_mips *c = &current_cpu_data; 201 struct cpuinfo_mips *c = &current_cpu_data;
202 unsigned int config0; 202 unsigned int config0;
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 1ff43d5ac2c4..84536bf4a154 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -20,15 +20,15 @@
20#include <asm/barrier.h> 20#include <asm/barrier.h>
21#include <asm/mipsregs.h> 21#include <asm/mipsregs.h>
22 22
23static atomic_t __cpuinitdata count_start_flag = ATOMIC_INIT(0); 23static atomic_t count_start_flag = ATOMIC_INIT(0);
24static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0); 24static atomic_t count_count_start = ATOMIC_INIT(0);
25static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0); 25static atomic_t count_count_stop = ATOMIC_INIT(0);
26static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); 26static atomic_t count_reference = ATOMIC_INIT(0);
27 27
28#define COUNTON 100 28#define COUNTON 100
29#define NR_LOOPS 5 29#define NR_LOOPS 5
30 30
31void __cpuinit synchronise_count_master(int cpu) 31void synchronise_count_master(int cpu)
32{ 32{
33 int i; 33 int i;
34 unsigned long flags; 34 unsigned long flags;
@@ -106,7 +106,7 @@ void __cpuinit synchronise_count_master(int cpu)
106 printk("done.\n"); 106 printk("done.\n");
107} 107}
108 108
109void __cpuinit synchronise_count_slave(int cpu) 109void synchronise_count_slave(int cpu)
110{ 110{
111 int i; 111 int i;
112 unsigned int initcount; 112 unsigned int initcount;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 0903d70b2cfe..aec3408edd4b 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -90,7 +90,7 @@ void (*board_nmi_handler_setup)(void);
90void (*board_ejtag_handler_setup)(void); 90void (*board_ejtag_handler_setup)(void);
91void (*board_bind_eic_interrupt)(int irq, int regset); 91void (*board_bind_eic_interrupt)(int irq, int regset);
92void (*board_ebase_setup)(void); 92void (*board_ebase_setup)(void);
93void __cpuinitdata(*board_cache_error_setup)(void); 93void(*board_cache_error_setup)(void);
94 94
95static void show_raw_backtrace(unsigned long reg29) 95static void show_raw_backtrace(unsigned long reg29)
96{ 96{
@@ -1242,7 +1242,6 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1242 panic("Caught Machine Check exception - %scaused by multiple " 1242 panic("Caught Machine Check exception - %scaused by multiple "
1243 "matching entries in the TLB.", 1243 "matching entries in the TLB.",
1244 (multi_match) ? "" : "not "); 1244 (multi_match) ? "" : "not ");
1245 exception_exit(prev_state);
1246} 1245}
1247 1246
1248asmlinkage void do_mt(struct pt_regs *regs) 1247asmlinkage void do_mt(struct pt_regs *regs)
@@ -1682,7 +1681,7 @@ int cp0_compare_irq_shift;
1682int cp0_perfcount_irq; 1681int cp0_perfcount_irq;
1683EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1682EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1684 1683
1685static int __cpuinitdata noulri; 1684static int noulri;
1686 1685
1687static int __init ulri_disable(char *s) 1686static int __init ulri_disable(char *s)
1688{ 1687{
@@ -1693,7 +1692,7 @@ static int __init ulri_disable(char *s)
1693} 1692}
1694__setup("noulri", ulri_disable); 1693__setup("noulri", ulri_disable);
1695 1694
1696void __cpuinit per_cpu_trap_init(bool is_boot_cpu) 1695void per_cpu_trap_init(bool is_boot_cpu)
1697{ 1696{
1698 unsigned int cpu = smp_processor_id(); 1697 unsigned int cpu = smp_processor_id();
1699 unsigned int status_set = ST0_CU0; 1698 unsigned int status_set = ST0_CU0;
@@ -1810,7 +1809,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1810} 1809}
1811 1810
1812/* Install CPU exception handler */ 1811/* Install CPU exception handler */
1813void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) 1812void set_handler(unsigned long offset, void *addr, unsigned long size)
1814{ 1813{
1815#ifdef CONFIG_CPU_MICROMIPS 1814#ifdef CONFIG_CPU_MICROMIPS
1816 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); 1815 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -1820,7 +1819,7 @@ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1820 local_flush_icache_range(ebase + offset, ebase + offset + size); 1819 local_flush_icache_range(ebase + offset, ebase + offset + size);
1821} 1820}
1822 1821
1823static char panic_null_cerr[] __cpuinitdata = 1822static char panic_null_cerr[] =
1824 "Trying to set NULL cache error exception handler"; 1823 "Trying to set NULL cache error exception handler";
1825 1824
1826/* 1825/*
@@ -1828,7 +1827,7 @@ static char panic_null_cerr[] __cpuinitdata =
1828 * This is suitable only for the cache error exception which is the only 1827 * This is suitable only for the cache error exception which is the only
1829 * exception handler that is being run uncached. 1828 * exception handler that is being run uncached.
1830 */ 1829 */
1831void __cpuinit set_uncached_handler(unsigned long offset, void *addr, 1830void set_uncached_handler(unsigned long offset, void *addr,
1832 unsigned long size) 1831 unsigned long size)
1833{ 1832{
1834 unsigned long uncached_ebase = CKSEG1ADDR(ebase); 1833 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index cbdc4de85bb4..2a03abb5bd2c 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -100,7 +100,7 @@ void mips_clear_watch_registers(void)
100 } 100 }
101} 101}
102 102
103__cpuinit void mips_probe_watch_registers(struct cpuinfo_mips *c) 103void mips_probe_watch_registers(struct cpuinfo_mips *c)
104{ 104{
105 unsigned int t; 105 unsigned int t;
106 106
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2c15590e55f7..30e334e823bd 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -5,7 +5,6 @@ source "virt/kvm/Kconfig"
5 5
6menuconfig VIRTUALIZATION 6menuconfig VIRTUALIZATION
7 bool "Virtualization" 7 bool "Virtualization"
8 depends on HAVE_KVM
9 ---help--- 8 ---help---
10 Say Y here to get to see options for using your Linux host to run 9 Say Y here to get to see options for using your Linux host to run
11 other operating systems inside virtual machines (guests). 10 other operating systems inside virtual machines (guests).
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 51194875f158..eb3e18659630 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -461,7 +461,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
461 return 0; 461 return 0;
462} 462}
463 463
464unsigned int __cpuinit get_c0_compare_int(void) 464unsigned int get_c0_compare_int(void)
465{ 465{
466 return MIPS_CPU_TIMER_IRQ; 466 return MIPS_CPU_TIMER_IRQ;
467} 467}
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index 65e3dfc4e585..d8522f8e842a 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -36,7 +36,7 @@
36 * values, so we can avoid sharing the same stack area between a cached 36 * values, so we can avoid sharing the same stack area between a cached
37 * and the uncached mode. 37 * and the uncached mode.
38 */ 38 */
39unsigned long __cpuinit run_uncached(void *func) 39unsigned long run_uncached(void *func)
40{ 40{
41 register long sp __asm__("$sp"); 41 register long sp __asm__("$sp");
42 register long ret __asm__("$2"); 42 register long ret __asm__("$2");
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb552863..a0bcdbb81d41 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -180,7 +180,7 @@ static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size)
180 * Probe Octeon's caches 180 * Probe Octeon's caches
181 * 181 *
182 */ 182 */
183static void __cpuinit probe_octeon(void) 183static void probe_octeon(void)
184{ 184{
185 unsigned long icache_size; 185 unsigned long icache_size;
186 unsigned long dcache_size; 186 unsigned long dcache_size;
@@ -251,7 +251,7 @@ static void __cpuinit probe_octeon(void)
251 } 251 }
252} 252}
253 253
254static void __cpuinit octeon_cache_error_setup(void) 254static void octeon_cache_error_setup(void)
255{ 255{
256 extern char except_vec2_octeon; 256 extern char except_vec2_octeon;
257 set_handler(0x100, &except_vec2_octeon, 0x80); 257 set_handler(0x100, &except_vec2_octeon, 0x80);
@@ -261,7 +261,7 @@ static void __cpuinit octeon_cache_error_setup(void)
261 * Setup the Octeon cache flush routines 261 * Setup the Octeon cache flush routines
262 * 262 *
263 */ 263 */
264void __cpuinit octeon_cache_init(void) 264void octeon_cache_init(void)
265{ 265{
266 probe_octeon(); 266 probe_octeon();
267 267
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc735a59d..2fcde0c8ea02 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -26,7 +26,7 @@
26static unsigned long icache_size, dcache_size; /* Size in bytes */ 26static unsigned long icache_size, dcache_size; /* Size in bytes */
27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */ 27static unsigned long icache_lsize, dcache_lsize; /* Size in bytes */
28 28
29unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags) 29unsigned long r3k_cache_size(unsigned long ca_flags)
30{ 30{
31 unsigned long flags, status, dummy, size; 31 unsigned long flags, status, dummy, size;
32 volatile unsigned long *p; 32 volatile unsigned long *p;
@@ -61,7 +61,7 @@ unsigned long __cpuinit r3k_cache_size(unsigned long ca_flags)
61 return size * sizeof(*p); 61 return size * sizeof(*p);
62} 62}
63 63
64unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags) 64unsigned long r3k_cache_lsize(unsigned long ca_flags)
65{ 65{
66 unsigned long flags, status, lsize, i; 66 unsigned long flags, status, lsize, i;
67 volatile unsigned long *p; 67 volatile unsigned long *p;
@@ -90,7 +90,7 @@ unsigned long __cpuinit r3k_cache_lsize(unsigned long ca_flags)
90 return lsize * sizeof(*p); 90 return lsize * sizeof(*p);
91} 91}
92 92
93static void __cpuinit r3k_probe_cache(void) 93static void r3k_probe_cache(void)
94{ 94{
95 dcache_size = r3k_cache_size(ST0_ISC); 95 dcache_size = r3k_cache_size(ST0_ISC);
96 if (dcache_size) 96 if (dcache_size)
@@ -312,7 +312,7 @@ static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size)
312 r3k_flush_dcache_range(start, start + size); 312 r3k_flush_dcache_range(start, start + size);
313} 313}
314 314
315void __cpuinit r3k_cache_init(void) 315void r3k_cache_init(void)
316{ 316{
317 extern void build_clear_page(void); 317 extern void build_clear_page(void);
318 extern void build_copy_page(void); 318 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813beec7a5..f749f687ee87 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -107,7 +107,7 @@ static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
107 blast_dcache64_page(addr); 107 blast_dcache64_page(addr);
108} 108}
109 109
110static void __cpuinit r4k_blast_dcache_page_setup(void) 110static void r4k_blast_dcache_page_setup(void)
111{ 111{
112 unsigned long dc_lsize = cpu_dcache_line_size(); 112 unsigned long dc_lsize = cpu_dcache_line_size();
113 113
@@ -123,7 +123,7 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
123 123
124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
125 125
126static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) 126static void r4k_blast_dcache_page_indexed_setup(void)
127{ 127{
128 unsigned long dc_lsize = cpu_dcache_line_size(); 128 unsigned long dc_lsize = cpu_dcache_line_size();
129 129
@@ -140,7 +140,7 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
140void (* r4k_blast_dcache)(void); 140void (* r4k_blast_dcache)(void);
141EXPORT_SYMBOL(r4k_blast_dcache); 141EXPORT_SYMBOL(r4k_blast_dcache);
142 142
143static void __cpuinit r4k_blast_dcache_setup(void) 143static void r4k_blast_dcache_setup(void)
144{ 144{
145 unsigned long dc_lsize = cpu_dcache_line_size(); 145 unsigned long dc_lsize = cpu_dcache_line_size();
146 146
@@ -227,7 +227,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
227 227
228static void (* r4k_blast_icache_page)(unsigned long addr); 228static void (* r4k_blast_icache_page)(unsigned long addr);
229 229
230static void __cpuinit r4k_blast_icache_page_setup(void) 230static void r4k_blast_icache_page_setup(void)
231{ 231{
232 unsigned long ic_lsize = cpu_icache_line_size(); 232 unsigned long ic_lsize = cpu_icache_line_size();
233 233
@@ -244,7 +244,7 @@ static void __cpuinit r4k_blast_icache_page_setup(void)
244 244
245static void (* r4k_blast_icache_page_indexed)(unsigned long addr); 245static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
246 246
247static void __cpuinit r4k_blast_icache_page_indexed_setup(void) 247static void r4k_blast_icache_page_indexed_setup(void)
248{ 248{
249 unsigned long ic_lsize = cpu_icache_line_size(); 249 unsigned long ic_lsize = cpu_icache_line_size();
250 250
@@ -269,7 +269,7 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
269void (* r4k_blast_icache)(void); 269void (* r4k_blast_icache)(void);
270EXPORT_SYMBOL(r4k_blast_icache); 270EXPORT_SYMBOL(r4k_blast_icache);
271 271
272static void __cpuinit r4k_blast_icache_setup(void) 272static void r4k_blast_icache_setup(void)
273{ 273{
274 unsigned long ic_lsize = cpu_icache_line_size(); 274 unsigned long ic_lsize = cpu_icache_line_size();
275 275
@@ -290,7 +290,7 @@ static void __cpuinit r4k_blast_icache_setup(void)
290 290
291static void (* r4k_blast_scache_page)(unsigned long addr); 291static void (* r4k_blast_scache_page)(unsigned long addr);
292 292
293static void __cpuinit r4k_blast_scache_page_setup(void) 293static void r4k_blast_scache_page_setup(void)
294{ 294{
295 unsigned long sc_lsize = cpu_scache_line_size(); 295 unsigned long sc_lsize = cpu_scache_line_size();
296 296
@@ -308,7 +308,7 @@ static void __cpuinit r4k_blast_scache_page_setup(void)
308 308
309static void (* r4k_blast_scache_page_indexed)(unsigned long addr); 309static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
310 310
311static void __cpuinit r4k_blast_scache_page_indexed_setup(void) 311static void r4k_blast_scache_page_indexed_setup(void)
312{ 312{
313 unsigned long sc_lsize = cpu_scache_line_size(); 313 unsigned long sc_lsize = cpu_scache_line_size();
314 314
@@ -326,7 +326,7 @@ static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
326 326
327static void (* r4k_blast_scache)(void); 327static void (* r4k_blast_scache)(void);
328 328
329static void __cpuinit r4k_blast_scache_setup(void) 329static void r4k_blast_scache_setup(void)
330{ 330{
331 unsigned long sc_lsize = cpu_scache_line_size(); 331 unsigned long sc_lsize = cpu_scache_line_size();
332 332
@@ -797,11 +797,11 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
797 } 797 }
798} 798}
799 799
800static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", 800static char *way_string[] = { NULL, "direct mapped", "2-way",
801 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 801 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
802}; 802};
803 803
804static void __cpuinit probe_pcache(void) 804static void probe_pcache(void)
805{ 805{
806 struct cpuinfo_mips *c = &current_cpu_data; 806 struct cpuinfo_mips *c = &current_cpu_data;
807 unsigned int config = read_c0_config(); 807 unsigned int config = read_c0_config();
@@ -1119,7 +1119,7 @@ static void __cpuinit probe_pcache(void)
1119 * executes in KSEG1 space or else you will crash and burn badly. You have 1119 * executes in KSEG1 space or else you will crash and burn badly. You have
1120 * been warned. 1120 * been warned.
1121 */ 1121 */
1122static int __cpuinit probe_scache(void) 1122static int probe_scache(void)
1123{ 1123{
1124 unsigned long flags, addr, begin, end, pow2; 1124 unsigned long flags, addr, begin, end, pow2;
1125 unsigned int config = read_c0_config(); 1125 unsigned int config = read_c0_config();
@@ -1196,7 +1196,7 @@ extern int r5k_sc_init(void);
1196extern int rm7k_sc_init(void); 1196extern int rm7k_sc_init(void);
1197extern int mips_sc_init(void); 1197extern int mips_sc_init(void);
1198 1198
1199static void __cpuinit setup_scache(void) 1199static void setup_scache(void)
1200{ 1200{
1201 struct cpuinfo_mips *c = &current_cpu_data; 1201 struct cpuinfo_mips *c = &current_cpu_data;
1202 unsigned int config = read_c0_config(); 1202 unsigned int config = read_c0_config();
@@ -1329,7 +1329,7 @@ static void nxp_pr4450_fixup_config(void)
1329 NXP_BARRIER(); 1329 NXP_BARRIER();
1330} 1330}
1331 1331
1332static int __cpuinitdata cca = -1; 1332static int cca = -1;
1333 1333
1334static int __init cca_setup(char *str) 1334static int __init cca_setup(char *str)
1335{ 1335{
@@ -1340,7 +1340,7 @@ static int __init cca_setup(char *str)
1340 1340
1341early_param("cca", cca_setup); 1341early_param("cca", cca_setup);
1342 1342
1343static void __cpuinit coherency_setup(void) 1343static void coherency_setup(void)
1344{ 1344{
1345 if (cca < 0 || cca > 7) 1345 if (cca < 0 || cca > 7)
1346 cca = read_c0_config() & CONF_CM_CMASK; 1346 cca = read_c0_config() & CONF_CM_CMASK;
@@ -1380,7 +1380,7 @@ static void __cpuinit coherency_setup(void)
1380 } 1380 }
1381} 1381}
1382 1382
1383static void __cpuinit r4k_cache_error_setup(void) 1383static void r4k_cache_error_setup(void)
1384{ 1384{
1385 extern char __weak except_vec2_generic; 1385 extern char __weak except_vec2_generic;
1386 extern char __weak except_vec2_sb1; 1386 extern char __weak except_vec2_sb1;
@@ -1398,7 +1398,7 @@ static void __cpuinit r4k_cache_error_setup(void)
1398 } 1398 }
1399} 1399}
1400 1400
1401void __cpuinit r4k_cache_init(void) 1401void r4k_cache_init(void)
1402{ 1402{
1403 extern void build_clear_page(void); 1403 extern void build_clear_page(void);
1404 extern void build_copy_page(void); 1404 extern void build_copy_page(void);
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da270289f..8d909dbbf37f 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -344,7 +344,7 @@ static __init void tx39_probe_cache(void)
344 } 344 }
345} 345}
346 346
347void __cpuinit tx39_cache_init(void) 347void tx39_cache_init(void)
348{ 348{
349 extern void build_clear_page(void); 349 extern void build_clear_page(void);
350 extern void build_copy_page(void); 350 extern void build_copy_page(void);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb0b72f..15f813c303b4 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -182,7 +182,7 @@ static inline void setup_protection_map(void)
182 } 182 }
183} 183}
184 184
185void __cpuinit cpu_cache_init(void) 185void cpu_cache_init(void)
186{ 186{
187 if (cpu_has_3k_cache) { 187 if (cpu_has_3k_cache) {
188 extern void __weak r3k_cache_init(void); 188 extern void __weak r3k_cache_init(void);
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index fe1d887e8d70..191cf6e0c725 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -49,8 +49,6 @@
49 * (0x170-0x17f) are used to preserve k0, k1, and ra. 49 * (0x170-0x17f) are used to preserve k0, k1, and ra.
50 */ 50 */
51 51
52 __CPUINIT
53
54LEAF(except_vec2_sb1) 52LEAF(except_vec2_sb1)
55 /* 53 /*
56 * If this error is recoverable, we need to exit the handler 54 * If this error is recoverable, we need to exit the handler
@@ -142,8 +140,6 @@ unrecoverable:
142 140
143END(except_vec2_sb1) 141END(except_vec2_sb1)
144 142
145 __FINIT
146
147 LEAF(handle_vec2_sb1) 143 LEAF(handle_vec2_sb1)
148 mfc0 k0,CP0_CONFIG 144 mfc0 k0,CP0_CONFIG
149 li k1,~CONF_CM_CMASK 145 li k1,~CONF_CM_CMASK
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 2c0bd580b9da..218c2109a55d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -66,29 +66,29 @@ UASM_L_LA(_copy_pref_both)
66UASM_L_LA(_copy_pref_store) 66UASM_L_LA(_copy_pref_store)
67 67
68/* We need one branch and therefore one relocation per target label. */ 68/* We need one branch and therefore one relocation per target label. */
69static struct uasm_label __cpuinitdata labels[5]; 69static struct uasm_label labels[5];
70static struct uasm_reloc __cpuinitdata relocs[5]; 70static struct uasm_reloc relocs[5];
71 71
72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 72#define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 73#define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
74 74
75static int pref_bias_clear_store __cpuinitdata; 75static int pref_bias_clear_store;
76static int pref_bias_copy_load __cpuinitdata; 76static int pref_bias_copy_load;
77static int pref_bias_copy_store __cpuinitdata; 77static int pref_bias_copy_store;
78 78
79static u32 pref_src_mode __cpuinitdata; 79static u32 pref_src_mode;
80static u32 pref_dst_mode __cpuinitdata; 80static u32 pref_dst_mode;
81 81
82static int clear_word_size __cpuinitdata; 82static int clear_word_size;
83static int copy_word_size __cpuinitdata; 83static int copy_word_size;
84 84
85static int half_clear_loop_size __cpuinitdata; 85static int half_clear_loop_size;
86static int half_copy_loop_size __cpuinitdata; 86static int half_copy_loop_size;
87 87
88static int cache_line_size __cpuinitdata; 88static int cache_line_size;
89#define cache_line_mask() (cache_line_size - 1) 89#define cache_line_mask() (cache_line_size - 1)
90 90
91static inline void __cpuinit 91static inline void
92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) 92pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
93{ 93{
94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { 94 if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
@@ -108,7 +108,7 @@ pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
108 } 108 }
109} 109}
110 110
111static void __cpuinit set_prefetch_parameters(void) 111static void set_prefetch_parameters(void)
112{ 112{
113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) 113 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
114 clear_word_size = 8; 114 clear_word_size = 8;
@@ -199,7 +199,7 @@ static void __cpuinit set_prefetch_parameters(void)
199 4 * copy_word_size)); 199 4 * copy_word_size));
200} 200}
201 201
202static void __cpuinit build_clear_store(u32 **buf, int off) 202static void build_clear_store(u32 **buf, int off)
203{ 203{
204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { 204 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
205 uasm_i_sd(buf, ZERO, off, A0); 205 uasm_i_sd(buf, ZERO, off, A0);
@@ -208,7 +208,7 @@ static void __cpuinit build_clear_store(u32 **buf, int off)
208 } 208 }
209} 209}
210 210
211static inline void __cpuinit build_clear_pref(u32 **buf, int off) 211static inline void build_clear_pref(u32 **buf, int off)
212{ 212{
213 if (off & cache_line_mask()) 213 if (off & cache_line_mask())
214 return; 214 return;
@@ -240,7 +240,7 @@ extern u32 __clear_page_end;
240extern u32 __copy_page_start; 240extern u32 __copy_page_start;
241extern u32 __copy_page_end; 241extern u32 __copy_page_end;
242 242
243void __cpuinit build_clear_page(void) 243void build_clear_page(void)
244{ 244{
245 int off; 245 int off;
246 u32 *buf = &__clear_page_start; 246 u32 *buf = &__clear_page_start;
@@ -333,7 +333,7 @@ void __cpuinit build_clear_page(void)
333 pr_debug("\t.set pop\n"); 333 pr_debug("\t.set pop\n");
334} 334}
335 335
336static void __cpuinit build_copy_load(u32 **buf, int reg, int off) 336static void build_copy_load(u32 **buf, int reg, int off)
337{ 337{
338 if (cpu_has_64bit_gp_regs) { 338 if (cpu_has_64bit_gp_regs) {
339 uasm_i_ld(buf, reg, off, A1); 339 uasm_i_ld(buf, reg, off, A1);
@@ -342,7 +342,7 @@ static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
342 } 342 }
343} 343}
344 344
345static void __cpuinit build_copy_store(u32 **buf, int reg, int off) 345static void build_copy_store(u32 **buf, int reg, int off)
346{ 346{
347 if (cpu_has_64bit_gp_regs) { 347 if (cpu_has_64bit_gp_regs) {
348 uasm_i_sd(buf, reg, off, A0); 348 uasm_i_sd(buf, reg, off, A0);
@@ -387,7 +387,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
387 } 387 }
388} 388}
389 389
390void __cpuinit build_copy_page(void) 390void build_copy_page(void)
391{ 391{
392 int off; 392 int off;
393 u32 *buf = &__copy_page_start; 393 u32 *buf = &__copy_page_start;
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index c6aaed934d53..dc7c5a5214a9 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -167,7 +167,7 @@ static struct bcache_ops indy_sc_ops = {
167 .bc_inv = indy_sc_wback_invalidate 167 .bc_inv = indy_sc_wback_invalidate
168}; 168};
169 169
170void __cpuinit indy_sc_init(void) 170void indy_sc_init(void)
171{ 171{
172 if (indy_sc_probe()) { 172 if (indy_sc_probe()) {
173 indy_sc_enable(); 173 indy_sc_enable();
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7e939b..5d01392e3518 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -132,7 +132,7 @@ static inline int __init mips_sc_probe(void)
132 return 1; 132 return 1;
133} 133}
134 134
135int __cpuinit mips_sc_init(void) 135int mips_sc_init(void)
136{ 136{
137 int found = mips_sc_probe(); 137 int found = mips_sc_probe();
138 if (found) { 138 if (found) {
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8bc67720e145..0216ed6eaa2a 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -98,7 +98,7 @@ static struct bcache_ops r5k_sc_ops = {
98 .bc_inv = r5k_dma_cache_inv_sc 98 .bc_inv = r5k_dma_cache_inv_sc
99}; 99};
100 100
101void __cpuinit r5k_sc_init(void) 101void r5k_sc_init(void)
102{ 102{
103 if (r5k_sc_probe()) { 103 if (r5k_sc_probe()) {
104 r5k_sc_enable(); 104 r5k_sc_enable();
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 274af3be1442..aaffbba33706 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -104,7 +104,7 @@ static void blast_rm7k_tcache(void)
104/* 104/*
105 * This function is executed in uncached address space. 105 * This function is executed in uncached address space.
106 */ 106 */
107static __cpuinit void __rm7k_tc_enable(void) 107static void __rm7k_tc_enable(void)
108{ 108{
109 int i; 109 int i;
110 110
@@ -117,7 +117,7 @@ static __cpuinit void __rm7k_tc_enable(void)
117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i)); 117 cache_op(Index_Store_Tag_T, CKSEG0ADDR(i));
118} 118}
119 119
120static __cpuinit void rm7k_tc_enable(void) 120static void rm7k_tc_enable(void)
121{ 121{
122 if (read_c0_config() & RM7K_CONF_TE) 122 if (read_c0_config() & RM7K_CONF_TE)
123 return; 123 return;
@@ -130,7 +130,7 @@ static __cpuinit void rm7k_tc_enable(void)
130/* 130/*
131 * This function is executed in uncached address space. 131 * This function is executed in uncached address space.
132 */ 132 */
133static __cpuinit void __rm7k_sc_enable(void) 133static void __rm7k_sc_enable(void)
134{ 134{
135 int i; 135 int i;
136 136
@@ -143,7 +143,7 @@ static __cpuinit void __rm7k_sc_enable(void)
143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i)); 143 cache_op(Index_Store_Tag_SD, CKSEG0ADDR(i));
144} 144}
145 145
146static __cpuinit void rm7k_sc_enable(void) 146static void rm7k_sc_enable(void)
147{ 147{
148 if (read_c0_config() & RM7K_CONF_SE) 148 if (read_c0_config() & RM7K_CONF_SE)
149 return; 149 return;
@@ -184,7 +184,7 @@ static struct bcache_ops rm7k_sc_ops = {
184 * This is a probing function like the one found in c-r4k.c, we look for the 184 * This is a probing function like the one found in c-r4k.c, we look for the
185 * wrap around point with different addresses. 185 * wrap around point with different addresses.
186 */ 186 */
187static __cpuinit void __probe_tcache(void) 187static void __probe_tcache(void)
188{ 188{
189 unsigned long flags, addr, begin, end, pow2; 189 unsigned long flags, addr, begin, end, pow2;
190 190
@@ -226,7 +226,7 @@ static __cpuinit void __probe_tcache(void)
226 local_irq_restore(flags); 226 local_irq_restore(flags);
227} 227}
228 228
229void __cpuinit rm7k_sc_init(void) 229void rm7k_sc_init(void)
230{ 230{
231 struct cpuinfo_mips *c = &current_cpu_data; 231 struct cpuinfo_mips *c = &current_cpu_data;
232 unsigned int config = read_c0_config(); 232 unsigned int config = read_c0_config();
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index a63d1ed0827f..9aca10994cd2 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -276,7 +276,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
276 } 276 }
277} 277}
278 278
279void __cpuinit tlb_init(void) 279void tlb_init(void)
280{ 280{
281 local_flush_tlb_all(); 281 local_flush_tlb_all();
282 282
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4c473a..00b26a67a06d 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -389,7 +389,7 @@ int __init has_transparent_hugepage(void)
389 389
390#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 390#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
391 391
392static int __cpuinitdata ntlb; 392static int ntlb;
393static int __init set_ntlb(char *str) 393static int __init set_ntlb(char *str)
394{ 394{
395 get_option(&str, &ntlb); 395 get_option(&str, &ntlb);
@@ -398,7 +398,7 @@ static int __init set_ntlb(char *str)
398 398
399__setup("ntlb=", set_ntlb); 399__setup("ntlb=", set_ntlb);
400 400
401void __cpuinit tlb_init(void) 401void tlb_init(void)
402{ 402{
403 /* 403 /*
404 * You should never change this register: 404 * You should never change this register:
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 91c2499f806a..6a99733a4440 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -213,14 +213,14 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
213 local_irq_restore(flags); 213 local_irq_restore(flags);
214} 214}
215 215
216static void __cpuinit probe_tlb(unsigned long config) 216static void probe_tlb(unsigned long config)
217{ 217{
218 struct cpuinfo_mips *c = &current_cpu_data; 218 struct cpuinfo_mips *c = &current_cpu_data;
219 219
220 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */ 220 c->tlbsize = 3 * 128; /* 3 sets each 128 entries */
221} 221}
222 222
223void __cpuinit tlb_init(void) 223void tlb_init(void)
224{ 224{
225 unsigned int config = read_c0_config(); 225 unsigned int config = read_c0_config();
226 unsigned long status; 226 unsigned long status;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 9ab0f907a52c..556cb4815770 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -136,7 +136,7 @@ static int scratchpad_offset(int i)
136 * why; it's not an issue caused by the core RTL. 136 * why; it's not an issue caused by the core RTL.
137 * 137 *
138 */ 138 */
139static int __cpuinit m4kc_tlbp_war(void) 139static int m4kc_tlbp_war(void)
140{ 140{
141 return (current_cpu_data.processor_id & 0xffff00) == 141 return (current_cpu_data.processor_id & 0xffff00) ==
142 (PRID_COMP_MIPS | PRID_IMP_4KC); 142 (PRID_COMP_MIPS | PRID_IMP_4KC);
@@ -181,11 +181,9 @@ UASM_L_LA(_large_segbits_fault)
181UASM_L_LA(_tlb_huge_update) 181UASM_L_LA(_tlb_huge_update)
182#endif 182#endif
183 183
184static int __cpuinitdata hazard_instance; 184static int hazard_instance;
185 185
186static void __cpuinit uasm_bgezl_hazard(u32 **p, 186static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
187 struct uasm_reloc **r,
188 int instance)
189{ 187{
190 switch (instance) { 188 switch (instance) {
191 case 0 ... 7: 189 case 0 ... 7:
@@ -196,9 +194,7 @@ static void __cpuinit uasm_bgezl_hazard(u32 **p,
196 } 194 }
197} 195}
198 196
199static void __cpuinit uasm_bgezl_label(struct uasm_label **l, 197static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
200 u32 **p,
201 int instance)
202{ 198{
203 switch (instance) { 199 switch (instance) {
204 case 0 ... 7: 200 case 0 ... 7:
@@ -295,15 +291,15 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
295 * We deliberately chose a buffer size of 128, so we won't scribble 291 * We deliberately chose a buffer size of 128, so we won't scribble
296 * over anything important on overflow before we panic. 292 * over anything important on overflow before we panic.
297 */ 293 */
298static u32 tlb_handler[128] __cpuinitdata; 294static u32 tlb_handler[128];
299 295
300/* simply assume worst case size for labels and relocs */ 296/* simply assume worst case size for labels and relocs */
301static struct uasm_label labels[128] __cpuinitdata; 297static struct uasm_label labels[128];
302static struct uasm_reloc relocs[128] __cpuinitdata; 298static struct uasm_reloc relocs[128];
303 299
304static int check_for_high_segbits __cpuinitdata; 300static int check_for_high_segbits;
305 301
306static unsigned int kscratch_used_mask __cpuinitdata; 302static unsigned int kscratch_used_mask;
307 303
308static inline int __maybe_unused c0_kscratch(void) 304static inline int __maybe_unused c0_kscratch(void)
309{ 305{
@@ -316,7 +312,7 @@ static inline int __maybe_unused c0_kscratch(void)
316 } 312 }
317} 313}
318 314
319static int __cpuinit allocate_kscratch(void) 315static int allocate_kscratch(void)
320{ 316{
321 int r; 317 int r;
322 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask; 318 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
@@ -333,11 +329,11 @@ static int __cpuinit allocate_kscratch(void)
333 return r; 329 return r;
334} 330}
335 331
336static int scratch_reg __cpuinitdata; 332static int scratch_reg;
337static int pgd_reg __cpuinitdata; 333static int pgd_reg;
338enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 334enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
339 335
340static struct work_registers __cpuinit build_get_work_registers(u32 **p) 336static struct work_registers build_get_work_registers(u32 **p)
341{ 337{
342 struct work_registers r; 338 struct work_registers r;
343 339
@@ -393,7 +389,7 @@ static struct work_registers __cpuinit build_get_work_registers(u32 **p)
393 return r; 389 return r;
394} 390}
395 391
396static void __cpuinit build_restore_work_registers(u32 **p) 392static void build_restore_work_registers(u32 **p)
397{ 393{
398 if (scratch_reg >= 0) { 394 if (scratch_reg >= 0) {
399 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 395 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
@@ -418,7 +414,7 @@ extern unsigned long pgd_current[];
418/* 414/*
419 * The R3000 TLB handler is simple. 415 * The R3000 TLB handler is simple.
420 */ 416 */
421static void __cpuinit build_r3000_tlb_refill_handler(void) 417static void build_r3000_tlb_refill_handler(void)
422{ 418{
423 long pgdc = (long)pgd_current; 419 long pgdc = (long)pgd_current;
424 u32 *p; 420 u32 *p;
@@ -463,7 +459,7 @@ static void __cpuinit build_r3000_tlb_refill_handler(void)
463 * other one.To keep things simple, we first assume linear space, 459 * other one.To keep things simple, we first assume linear space,
464 * then we relocate it to the final handler layout as needed. 460 * then we relocate it to the final handler layout as needed.
465 */ 461 */
466static u32 final_handler[64] __cpuinitdata; 462static u32 final_handler[64];
467 463
468/* 464/*
469 * Hazards 465 * Hazards
@@ -487,7 +483,7 @@ static u32 final_handler[64] __cpuinitdata;
487 * 483 *
488 * As if we MIPS hackers wouldn't know how to nop pipelines happy ... 484 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
489 */ 485 */
490static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p) 486static void __maybe_unused build_tlb_probe_entry(u32 **p)
491{ 487{
492 switch (current_cpu_type()) { 488 switch (current_cpu_type()) {
493 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */ 489 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
@@ -511,9 +507,9 @@ static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
511 */ 507 */
512enum tlb_write_entry { tlb_random, tlb_indexed }; 508enum tlb_write_entry { tlb_random, tlb_indexed };
513 509
514static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l, 510static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
515 struct uasm_reloc **r, 511 struct uasm_reloc **r,
516 enum tlb_write_entry wmode) 512 enum tlb_write_entry wmode)
517{ 513{
518 void(*tlbw)(u32 **) = NULL; 514 void(*tlbw)(u32 **) = NULL;
519 515
@@ -647,8 +643,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
647 } 643 }
648} 644}
649 645
650static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p, 646static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
651 unsigned int reg) 647 unsigned int reg)
652{ 648{
653 if (cpu_has_rixi) { 649 if (cpu_has_rixi) {
654 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 650 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -663,11 +659,9 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
663 659
664#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 660#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
665 661
666static __cpuinit void build_restore_pagemask(u32 **p, 662static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
667 struct uasm_reloc **r, 663 unsigned int tmp, enum label_id lid,
668 unsigned int tmp, 664 int restore_scratch)
669 enum label_id lid,
670 int restore_scratch)
671{ 665{
672 if (restore_scratch) { 666 if (restore_scratch) {
673 /* Reset default page size */ 667 /* Reset default page size */
@@ -706,12 +700,11 @@ static __cpuinit void build_restore_pagemask(u32 **p,
706 } 700 }
707} 701}
708 702
709static __cpuinit void build_huge_tlb_write_entry(u32 **p, 703static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
710 struct uasm_label **l, 704 struct uasm_reloc **r,
711 struct uasm_reloc **r, 705 unsigned int tmp,
712 unsigned int tmp, 706 enum tlb_write_entry wmode,
713 enum tlb_write_entry wmode, 707 int restore_scratch)
714 int restore_scratch)
715{ 708{
716 /* Set huge page tlb entry size */ 709 /* Set huge page tlb entry size */
717 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16); 710 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
@@ -726,9 +719,9 @@ static __cpuinit void build_huge_tlb_write_entry(u32 **p,
726/* 719/*
727 * Check if Huge PTE is present, if so then jump to LABEL. 720 * Check if Huge PTE is present, if so then jump to LABEL.
728 */ 721 */
729static void __cpuinit 722static void
730build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp, 723build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
731 unsigned int pmd, int lid) 724 unsigned int pmd, int lid)
732{ 725{
733 UASM_i_LW(p, tmp, 0, pmd); 726 UASM_i_LW(p, tmp, 0, pmd);
734 if (use_bbit_insns()) { 727 if (use_bbit_insns()) {
@@ -739,9 +732,8 @@ build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
739 } 732 }
740} 733}
741 734
742static __cpuinit void build_huge_update_entries(u32 **p, 735static void build_huge_update_entries(u32 **p, unsigned int pte,
743 unsigned int pte, 736 unsigned int tmp)
744 unsigned int tmp)
745{ 737{
746 int small_sequence; 738 int small_sequence;
747 739
@@ -771,11 +763,10 @@ static __cpuinit void build_huge_update_entries(u32 **p,
771 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */ 763 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
772} 764}
773 765
774static __cpuinit void build_huge_handler_tail(u32 **p, 766static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
775 struct uasm_reloc **r, 767 struct uasm_label **l,
776 struct uasm_label **l, 768 unsigned int pte,
777 unsigned int pte, 769 unsigned int ptr)
778 unsigned int ptr)
779{ 770{
780#ifdef CONFIG_SMP 771#ifdef CONFIG_SMP
781 UASM_i_SC(p, pte, 0, ptr); 772 UASM_i_SC(p, pte, 0, ptr);
@@ -794,7 +785,7 @@ static __cpuinit void build_huge_handler_tail(u32 **p,
794 * TMP and PTR are scratch. 785 * TMP and PTR are scratch.
795 * TMP will be clobbered, PTR will hold the pmd entry. 786 * TMP will be clobbered, PTR will hold the pmd entry.
796 */ 787 */
797static void __cpuinit 788static void
798build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 789build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
799 unsigned int tmp, unsigned int ptr) 790 unsigned int tmp, unsigned int ptr)
800{ 791{
@@ -886,7 +877,7 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
886 * BVADDR is the faulting address, PTR is scratch. 877 * BVADDR is the faulting address, PTR is scratch.
887 * PTR will hold the pgd for vmalloc. 878 * PTR will hold the pgd for vmalloc.
888 */ 879 */
889static void __cpuinit 880static void
890build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 881build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
891 unsigned int bvaddr, unsigned int ptr, 882 unsigned int bvaddr, unsigned int ptr,
892 enum vmalloc64_mode mode) 883 enum vmalloc64_mode mode)
@@ -956,7 +947,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
956 * TMP and PTR are scratch. 947 * TMP and PTR are scratch.
957 * TMP will be clobbered, PTR will hold the pgd entry. 948 * TMP will be clobbered, PTR will hold the pgd entry.
958 */ 949 */
959static void __cpuinit __maybe_unused 950static void __maybe_unused
960build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr) 951build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
961{ 952{
962 long pgdc = (long)pgd_current; 953 long pgdc = (long)pgd_current;
@@ -991,7 +982,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
991 982
992#endif /* !CONFIG_64BIT */ 983#endif /* !CONFIG_64BIT */
993 984
994static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx) 985static void build_adjust_context(u32 **p, unsigned int ctx)
995{ 986{
996 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12; 987 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
997 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1); 988 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
@@ -1017,7 +1008,7 @@ static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
1017 uasm_i_andi(p, ctx, ctx, mask); 1008 uasm_i_andi(p, ctx, ctx, mask);
1018} 1009}
1019 1010
1020static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr) 1011static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
1021{ 1012{
1022 /* 1013 /*
1023 * Bug workaround for the Nevada. It seems as if under certain 1014 * Bug workaround for the Nevada. It seems as if under certain
@@ -1042,8 +1033,7 @@ static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr
1042 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */ 1033 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
1043} 1034}
1044 1035
1045static void __cpuinit build_update_entries(u32 **p, unsigned int tmp, 1036static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1046 unsigned int ptep)
1047{ 1037{
1048 /* 1038 /*
1049 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1039 * 64bit address support (36bit on a 32bit CPU) in a 32bit
@@ -1104,7 +1094,7 @@ struct mips_huge_tlb_info {
1104 int restore_scratch; 1094 int restore_scratch;
1105}; 1095};
1106 1096
1107static struct mips_huge_tlb_info __cpuinit 1097static struct mips_huge_tlb_info
1108build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, 1098build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1109 struct uasm_reloc **r, unsigned int tmp, 1099 struct uasm_reloc **r, unsigned int tmp,
1110 unsigned int ptr, int c0_scratch_reg) 1100 unsigned int ptr, int c0_scratch_reg)
@@ -1282,7 +1272,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1282 */ 1272 */
1283#define MIPS64_REFILL_INSNS 32 1273#define MIPS64_REFILL_INSNS 32
1284 1274
1285static void __cpuinit build_r4000_tlb_refill_handler(void) 1275static void build_r4000_tlb_refill_handler(void)
1286{ 1276{
1287 u32 *p = tlb_handler; 1277 u32 *p = tlb_handler;
1288 struct uasm_label *l = labels; 1278 struct uasm_label *l = labels;
@@ -1462,11 +1452,11 @@ extern u32 handle_tlbm[], handle_tlbm_end[];
1462#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 1452#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
1463extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[]; 1453extern u32 tlbmiss_handler_setup_pgd[], tlbmiss_handler_setup_pgd_end[];
1464 1454
1465static void __cpuinit build_r4000_setup_pgd(void) 1455static void build_r4000_setup_pgd(void)
1466{ 1456{
1467 const int a0 = 4; 1457 const int a0 = 4;
1468 const int a1 = 5; 1458 const int a1 = 5;
1469 u32 *p = tlbmiss_handler_setup_pgd_array; 1459 u32 *p = tlbmiss_handler_setup_pgd;
1470 const int tlbmiss_handler_setup_pgd_size = 1460 const int tlbmiss_handler_setup_pgd_size =
1471 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd; 1461 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd;
1472 struct uasm_label *l = labels; 1462 struct uasm_label *l = labels;
@@ -1513,7 +1503,7 @@ static void __cpuinit build_r4000_setup_pgd(void)
1513} 1503}
1514#endif 1504#endif
1515 1505
1516static void __cpuinit 1506static void
1517iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1507iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1518{ 1508{
1519#ifdef CONFIG_SMP 1509#ifdef CONFIG_SMP
@@ -1533,7 +1523,7 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1533#endif 1523#endif
1534} 1524}
1535 1525
1536static void __cpuinit 1526static void
1537iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1527iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1538 unsigned int mode) 1528 unsigned int mode)
1539{ 1529{
@@ -1593,7 +1583,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1593 * the page table where this PTE is located, PTE will be re-loaded 1583 * the page table where this PTE is located, PTE will be re-loaded
1594 * with it's original value. 1584 * with it's original value.
1595 */ 1585 */
1596static void __cpuinit 1586static void
1597build_pte_present(u32 **p, struct uasm_reloc **r, 1587build_pte_present(u32 **p, struct uasm_reloc **r,
1598 int pte, int ptr, int scratch, enum label_id lid) 1588 int pte, int ptr, int scratch, enum label_id lid)
1599{ 1589{
@@ -1621,7 +1611,7 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
1621} 1611}
1622 1612
1623/* Make PTE valid, store result in PTR. */ 1613/* Make PTE valid, store result in PTR. */
1624static void __cpuinit 1614static void
1625build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte, 1615build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1626 unsigned int ptr) 1616 unsigned int ptr)
1627{ 1617{
@@ -1634,7 +1624,7 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1634 * Check if PTE can be written to, if not branch to LABEL. Regardless 1624 * Check if PTE can be written to, if not branch to LABEL. Regardless
1635 * restore PTE with value from PTR when done. 1625 * restore PTE with value from PTR when done.
1636 */ 1626 */
1637static void __cpuinit 1627static void
1638build_pte_writable(u32 **p, struct uasm_reloc **r, 1628build_pte_writable(u32 **p, struct uasm_reloc **r,
1639 unsigned int pte, unsigned int ptr, int scratch, 1629 unsigned int pte, unsigned int ptr, int scratch,
1640 enum label_id lid) 1630 enum label_id lid)
@@ -1654,7 +1644,7 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
1654/* Make PTE writable, update software status bits as well, then store 1644/* Make PTE writable, update software status bits as well, then store
1655 * at PTR. 1645 * at PTR.
1656 */ 1646 */
1657static void __cpuinit 1647static void
1658build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte, 1648build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1659 unsigned int ptr) 1649 unsigned int ptr)
1660{ 1650{
@@ -1668,7 +1658,7 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1668 * Check if PTE can be modified, if not branch to LABEL. Regardless 1658 * Check if PTE can be modified, if not branch to LABEL. Regardless
1669 * restore PTE with value from PTR when done. 1659 * restore PTE with value from PTR when done.
1670 */ 1660 */
1671static void __cpuinit 1661static void
1672build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1662build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1673 unsigned int pte, unsigned int ptr, int scratch, 1663 unsigned int pte, unsigned int ptr, int scratch,
1674 enum label_id lid) 1664 enum label_id lid)
@@ -1697,7 +1687,7 @@ build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1697 * This places the pte into ENTRYLO0 and writes it with tlbwi. 1687 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1698 * Then it returns. 1688 * Then it returns.
1699 */ 1689 */
1700static void __cpuinit 1690static void
1701build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp) 1691build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1702{ 1692{
1703 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */ 1693 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
@@ -1713,7 +1703,7 @@ build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
1713 * may have the probe fail bit set as a result of a trap on a 1703 * may have the probe fail bit set as a result of a trap on a
1714 * kseg2 access, i.e. without refill. Then it returns. 1704 * kseg2 access, i.e. without refill. Then it returns.
1715 */ 1705 */
1716static void __cpuinit 1706static void
1717build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l, 1707build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1718 struct uasm_reloc **r, unsigned int pte, 1708 struct uasm_reloc **r, unsigned int pte,
1719 unsigned int tmp) 1709 unsigned int tmp)
@@ -1731,7 +1721,7 @@ build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1731 uasm_i_rfe(p); /* branch delay */ 1721 uasm_i_rfe(p); /* branch delay */
1732} 1722}
1733 1723
1734static void __cpuinit 1724static void
1735build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte, 1725build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1736 unsigned int ptr) 1726 unsigned int ptr)
1737{ 1727{
@@ -1751,7 +1741,7 @@ build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1751 uasm_i_tlbp(p); /* load delay */ 1741 uasm_i_tlbp(p); /* load delay */
1752} 1742}
1753 1743
1754static void __cpuinit build_r3000_tlb_load_handler(void) 1744static void build_r3000_tlb_load_handler(void)
1755{ 1745{
1756 u32 *p = handle_tlbl; 1746 u32 *p = handle_tlbl;
1757 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1747 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -1782,7 +1772,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
1782 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size); 1772 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
1783} 1773}
1784 1774
1785static void __cpuinit build_r3000_tlb_store_handler(void) 1775static void build_r3000_tlb_store_handler(void)
1786{ 1776{
1787 u32 *p = handle_tlbs; 1777 u32 *p = handle_tlbs;
1788 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 1778 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -1803,7 +1793,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1803 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1793 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1804 uasm_i_nop(&p); 1794 uasm_i_nop(&p);
1805 1795
1806 if (p >= handle_tlbs) 1796 if (p >= handle_tlbs_end)
1807 panic("TLB store handler fastpath space exceeded"); 1797 panic("TLB store handler fastpath space exceeded");
1808 1798
1809 uasm_resolve_relocs(relocs, labels); 1799 uasm_resolve_relocs(relocs, labels);
@@ -1813,7 +1803,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1813 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size); 1803 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
1814} 1804}
1815 1805
1816static void __cpuinit build_r3000_tlb_modify_handler(void) 1806static void build_r3000_tlb_modify_handler(void)
1817{ 1807{
1818 u32 *p = handle_tlbm; 1808 u32 *p = handle_tlbm;
1819 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 1809 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -1848,7 +1838,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1848/* 1838/*
1849 * R4000 style TLB load/store/modify handlers. 1839 * R4000 style TLB load/store/modify handlers.
1850 */ 1840 */
1851static struct work_registers __cpuinit 1841static struct work_registers
1852build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1842build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1853 struct uasm_reloc **r) 1843 struct uasm_reloc **r)
1854{ 1844{
@@ -1884,7 +1874,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1884 return wr; 1874 return wr;
1885} 1875}
1886 1876
1887static void __cpuinit 1877static void
1888build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, 1878build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1889 struct uasm_reloc **r, unsigned int tmp, 1879 struct uasm_reloc **r, unsigned int tmp,
1890 unsigned int ptr) 1880 unsigned int ptr)
@@ -1902,7 +1892,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1902#endif 1892#endif
1903} 1893}
1904 1894
1905static void __cpuinit build_r4000_tlb_load_handler(void) 1895static void build_r4000_tlb_load_handler(void)
1906{ 1896{
1907 u32 *p = handle_tlbl; 1897 u32 *p = handle_tlbl;
1908 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl; 1898 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
@@ -2085,7 +2075,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
2085 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size); 2075 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
2086} 2076}
2087 2077
2088static void __cpuinit build_r4000_tlb_store_handler(void) 2078static void build_r4000_tlb_store_handler(void)
2089{ 2079{
2090 u32 *p = handle_tlbs; 2080 u32 *p = handle_tlbs;
2091 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs; 2081 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
@@ -2140,7 +2130,7 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
2140 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size); 2130 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
2141} 2131}
2142 2132
2143static void __cpuinit build_r4000_tlb_modify_handler(void) 2133static void build_r4000_tlb_modify_handler(void)
2144{ 2134{
2145 u32 *p = handle_tlbm; 2135 u32 *p = handle_tlbm;
2146 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm; 2136 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
@@ -2196,7 +2186,7 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
2196 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size); 2186 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
2197} 2187}
2198 2188
2199static void __cpuinit flush_tlb_handlers(void) 2189static void flush_tlb_handlers(void)
2200{ 2190{
2201 local_flush_icache_range((unsigned long)handle_tlbl, 2191 local_flush_icache_range((unsigned long)handle_tlbl,
2202 (unsigned long)handle_tlbl_end); 2192 (unsigned long)handle_tlbl_end);
@@ -2210,7 +2200,7 @@ static void __cpuinit flush_tlb_handlers(void)
2210#endif 2200#endif
2211} 2201}
2212 2202
2213void __cpuinit build_tlb_refill_handler(void) 2203void build_tlb_refill_handler(void)
2214{ 2204{
2215 /* 2205 /*
2216 * The refill handler is generated per-CPU, multi-node systems 2206 * The refill handler is generated per-CPU, multi-node systems
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 162ee6d62788..060000fa653c 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -49,7 +49,7 @@
49 49
50#include "uasm.c" 50#include "uasm.c"
51 51
52static struct insn insn_table_MM[] __uasminitdata = { 52static struct insn insn_table_MM[] = {
53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, 53 { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD },
54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 54 { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, 55 { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD },
@@ -118,7 +118,7 @@ static struct insn insn_table_MM[] __uasminitdata = {
118 118
119#undef M 119#undef M
120 120
121static inline __uasminit u32 build_bimm(s32 arg) 121static inline u32 build_bimm(s32 arg)
122{ 122{
123 WARN(arg > 0xffff || arg < -0x10000, 123 WARN(arg > 0xffff || arg < -0x10000,
124 KERN_WARNING "Micro-assembler field overflow\n"); 124 KERN_WARNING "Micro-assembler field overflow\n");
@@ -128,7 +128,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); 128 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff);
129} 129}
130 130
131static inline __uasminit u32 build_jimm(u32 arg) 131static inline u32 build_jimm(u32 arg)
132{ 132{
133 133
134 WARN(arg & ~((JIMM_MASK << 2) | 1), 134 WARN(arg & ~((JIMM_MASK << 2) | 1),
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
141 * The order of opcode arguments is implicitly left to right, 141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM. 142 * starting with RS and ending with FUNC or IMM.
143 */ 143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) 144static void build_insn(u32 **buf, enum opcode opc, ...)
145{ 145{
146 struct insn *ip = NULL; 146 struct insn *ip = NULL;
147 unsigned int i; 147 unsigned int i;
@@ -199,7 +199,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
199 (*buf)++; 199 (*buf)++;
200} 200}
201 201
202static inline void __uasminit 202static inline void
203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 203__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
204{ 204{
205 long laddr = (long)lab->addr; 205 long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8fe3e83..0c724589854e 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -49,7 +49,7 @@
49 49
50#include "uasm.c" 50#include "uasm.c"
51 51
52static struct insn insn_table[] __uasminitdata = { 52static struct insn insn_table[] = {
53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 53 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 54 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 55 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
@@ -119,7 +119,7 @@ static struct insn insn_table[] __uasminitdata = {
119 119
120#undef M 120#undef M
121 121
122static inline __uasminit u32 build_bimm(s32 arg) 122static inline u32 build_bimm(s32 arg)
123{ 123{
124 WARN(arg > 0x1ffff || arg < -0x20000, 124 WARN(arg > 0x1ffff || arg < -0x20000,
125 KERN_WARNING "Micro-assembler field overflow\n"); 125 KERN_WARNING "Micro-assembler field overflow\n");
@@ -129,7 +129,7 @@ static inline __uasminit u32 build_bimm(s32 arg)
129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 129 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff);
130} 130}
131 131
132static inline __uasminit u32 build_jimm(u32 arg) 132static inline u32 build_jimm(u32 arg)
133{ 133{
134 WARN(arg & ~(JIMM_MASK << 2), 134 WARN(arg & ~(JIMM_MASK << 2),
135 KERN_WARNING "Micro-assembler field overflow\n"); 135 KERN_WARNING "Micro-assembler field overflow\n");
@@ -141,7 +141,7 @@ static inline __uasminit u32 build_jimm(u32 arg)
141 * The order of opcode arguments is implicitly left to right, 141 * The order of opcode arguments is implicitly left to right,
142 * starting with RS and ending with FUNC or IMM. 142 * starting with RS and ending with FUNC or IMM.
143 */ 143 */
144static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) 144static void build_insn(u32 **buf, enum opcode opc, ...)
145{ 145{
146 struct insn *ip = NULL; 146 struct insn *ip = NULL;
147 unsigned int i; 147 unsigned int i;
@@ -187,7 +187,7 @@ static void __uasminit build_insn(u32 **buf, enum opcode opc, ...)
187 (*buf)++; 187 (*buf)++;
188} 188}
189 189
190static inline void __uasminit 190static inline void
191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 191__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab)
192{ 192{
193 long laddr = (long)lab->addr; 193 long laddr = (long)lab->addr;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e4355d25..b9d14b6c7f58 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -63,35 +63,35 @@ struct insn {
63 enum fields fields; 63 enum fields fields;
64}; 64};
65 65
66static inline __uasminit u32 build_rs(u32 arg) 66static inline u32 build_rs(u32 arg)
67{ 67{
68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 68 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n");
69 69
70 return (arg & RS_MASK) << RS_SH; 70 return (arg & RS_MASK) << RS_SH;
71} 71}
72 72
73static inline __uasminit u32 build_rt(u32 arg) 73static inline u32 build_rt(u32 arg)
74{ 74{
75 WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 75 WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n");
76 76
77 return (arg & RT_MASK) << RT_SH; 77 return (arg & RT_MASK) << RT_SH;
78} 78}
79 79
80static inline __uasminit u32 build_rd(u32 arg) 80static inline u32 build_rd(u32 arg)
81{ 81{
82 WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 82 WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n");
83 83
84 return (arg & RD_MASK) << RD_SH; 84 return (arg & RD_MASK) << RD_SH;
85} 85}
86 86
87static inline __uasminit u32 build_re(u32 arg) 87static inline u32 build_re(u32 arg)
88{ 88{
89 WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 89 WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n");
90 90
91 return (arg & RE_MASK) << RE_SH; 91 return (arg & RE_MASK) << RE_SH;
92} 92}
93 93
94static inline __uasminit u32 build_simm(s32 arg) 94static inline u32 build_simm(s32 arg)
95{ 95{
96 WARN(arg > 0x7fff || arg < -0x8000, 96 WARN(arg > 0x7fff || arg < -0x8000,
97 KERN_WARNING "Micro-assembler field overflow\n"); 97 KERN_WARNING "Micro-assembler field overflow\n");
@@ -99,14 +99,14 @@ static inline __uasminit u32 build_simm(s32 arg)
99 return arg & 0xffff; 99 return arg & 0xffff;
100} 100}
101 101
102static inline __uasminit u32 build_uimm(u32 arg) 102static inline u32 build_uimm(u32 arg)
103{ 103{
104 WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 104 WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n");
105 105
106 return arg & IMM_MASK; 106 return arg & IMM_MASK;
107} 107}
108 108
109static inline __uasminit u32 build_scimm(u32 arg) 109static inline u32 build_scimm(u32 arg)
110{ 110{
111 WARN(arg & ~SCIMM_MASK, 111 WARN(arg & ~SCIMM_MASK,
112 KERN_WARNING "Micro-assembler field overflow\n"); 112 KERN_WARNING "Micro-assembler field overflow\n");
@@ -114,21 +114,21 @@ static inline __uasminit u32 build_scimm(u32 arg)
114 return (arg & SCIMM_MASK) << SCIMM_SH; 114 return (arg & SCIMM_MASK) << SCIMM_SH;
115} 115}
116 116
117static inline __uasminit u32 build_func(u32 arg) 117static inline u32 build_func(u32 arg)
118{ 118{
119 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 119 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
120 120
121 return arg & FUNC_MASK; 121 return arg & FUNC_MASK;
122} 122}
123 123
124static inline __uasminit u32 build_set(u32 arg) 124static inline u32 build_set(u32 arg)
125{ 125{
126 WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 126 WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n");
127 127
128 return arg & SET_MASK; 128 return arg & SET_MASK;
129} 129}
130 130
131static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); 131static void build_insn(u32 **buf, enum opcode opc, ...);
132 132
133#define I_u1u2u3(op) \ 133#define I_u1u2u3(op) \
134Ip_u1u2u3(op) \ 134Ip_u1u2u3(op) \
@@ -286,7 +286,7 @@ I_u3u1u2(_ldx)
286 286
287#ifdef CONFIG_CPU_CAVIUM_OCTEON 287#ifdef CONFIG_CPU_CAVIUM_OCTEON
288#include <asm/octeon/octeon.h> 288#include <asm/octeon/octeon.h>
289void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 289void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
290 unsigned int c) 290 unsigned int c)
291{ 291{
292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 292 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
@@ -304,7 +304,7 @@ I_u2s3u1(_pref)
304#endif 304#endif
305 305
306/* Handle labels. */ 306/* Handle labels. */
307void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) 307void ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid)
308{ 308{
309 (*lab)->addr = addr; 309 (*lab)->addr = addr;
310 (*lab)->lab = lid; 310 (*lab)->lab = lid;
@@ -312,7 +312,7 @@ void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, in
312} 312}
313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); 313UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
314 314
315int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) 315int ISAFUNC(uasm_in_compat_space_p)(long addr)
316{ 316{
317 /* Is this address in 32bit compat space? */ 317 /* Is this address in 32bit compat space? */
318#ifdef CONFIG_64BIT 318#ifdef CONFIG_64BIT
@@ -323,7 +323,7 @@ int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr)
323} 323}
324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); 324UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
325 325
326static int __uasminit uasm_rel_highest(long val) 326static int uasm_rel_highest(long val)
327{ 327{
328#ifdef CONFIG_64BIT 328#ifdef CONFIG_64BIT
329 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 329 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000;
@@ -332,7 +332,7 @@ static int __uasminit uasm_rel_highest(long val)
332#endif 332#endif
333} 333}
334 334
335static int __uasminit uasm_rel_higher(long val) 335static int uasm_rel_higher(long val)
336{ 336{
337#ifdef CONFIG_64BIT 337#ifdef CONFIG_64BIT
338 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 338 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000;
@@ -341,19 +341,19 @@ static int __uasminit uasm_rel_higher(long val)
341#endif 341#endif
342} 342}
343 343
344int __uasminit ISAFUNC(uasm_rel_hi)(long val) 344int ISAFUNC(uasm_rel_hi)(long val)
345{ 345{
346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 346 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000;
347} 347}
348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); 348UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi));
349 349
350int __uasminit ISAFUNC(uasm_rel_lo)(long val) 350int ISAFUNC(uasm_rel_lo)(long val)
351{ 351{
352 return ((val & 0xffff) ^ 0x8000) - 0x8000; 352 return ((val & 0xffff) ^ 0x8000) - 0x8000;
353} 353}
354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); 354UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo));
355 355
356void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) 356void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
357{ 357{
358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { 358 if (!ISAFUNC(uasm_in_compat_space_p)(addr)) {
359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); 359 ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr));
@@ -371,7 +371,7 @@ void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr)
371} 371}
372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); 372UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly));
373 373
374void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) 374void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
375{ 375{
376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); 376 ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr);
377 if (ISAFUNC(uasm_rel_lo(addr))) { 377 if (ISAFUNC(uasm_rel_lo(addr))) {
@@ -386,8 +386,7 @@ void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr)
386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); 386UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA));
387 387
388/* Handle relocations. */ 388/* Handle relocations. */
389void __uasminit 389void ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
390ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
391{ 390{
392 (*rel)->addr = addr; 391 (*rel)->addr = addr;
393 (*rel)->type = R_MIPS_PC16; 392 (*rel)->type = R_MIPS_PC16;
@@ -396,11 +395,11 @@ ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid)
396} 395}
397UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); 396UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16));
398 397
399static inline void __uasminit 398static inline void __resolve_relocs(struct uasm_reloc *rel,
400__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); 399 struct uasm_label *lab);
401 400
402void __uasminit 401void ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel,
403ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) 402 struct uasm_label *lab)
404{ 403{
405 struct uasm_label *l; 404 struct uasm_label *l;
406 405
@@ -411,8 +410,8 @@ ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab)
411} 410}
412UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); 411UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs));
413 412
414void __uasminit 413void ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end,
415ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 414 long off)
416{ 415{
417 for (; rel->lab != UASM_LABEL_INVALID; rel++) 416 for (; rel->lab != UASM_LABEL_INVALID; rel++)
418 if (rel->addr >= first && rel->addr < end) 417 if (rel->addr >= first && rel->addr < end)
@@ -420,8 +419,8 @@ ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off
420} 419}
421UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); 420UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs));
422 421
423void __uasminit 422void ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end,
424ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) 423 long off)
425{ 424{
426 for (; lab->lab != UASM_LABEL_INVALID; lab++) 425 for (; lab->lab != UASM_LABEL_INVALID; lab++)
427 if (lab->addr >= first && lab->addr < end) 426 if (lab->addr >= first && lab->addr < end)
@@ -429,9 +428,8 @@ ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off
429} 428}
430UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); 429UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels));
431 430
432void __uasminit 431void ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab,
433ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 432 u32 *first, u32 *end, u32 *target)
434 u32 *end, u32 *target)
435{ 433{
436 long off = (long)(target - first); 434 long off = (long)(target - first);
437 435
@@ -442,7 +440,7 @@ ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *
442} 440}
443UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); 441UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler));
444 442
445int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) 443int ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
446{ 444{
447 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 445 for (; rel->lab != UASM_LABEL_INVALID; rel++) {
448 if (rel->addr == addr 446 if (rel->addr == addr
@@ -456,83 +454,79 @@ int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr)
456UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); 454UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay));
457 455
458/* Convenience functions for labeled branches. */ 456/* Convenience functions for labeled branches. */
459void __uasminit 457void ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
460ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 458 int lid)
461{ 459{
462 uasm_r_mips_pc16(r, *p, lid); 460 uasm_r_mips_pc16(r, *p, lid);
463 ISAFUNC(uasm_i_bltz)(p, reg, 0); 461 ISAFUNC(uasm_i_bltz)(p, reg, 0);
464} 462}
465UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); 463UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz));
466 464
467void __uasminit 465void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
468ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid)
469{ 466{
470 uasm_r_mips_pc16(r, *p, lid); 467 uasm_r_mips_pc16(r, *p, lid);
471 ISAFUNC(uasm_i_b)(p, 0); 468 ISAFUNC(uasm_i_b)(p, 0);
472} 469}
473UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); 470UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b));
474 471
475void __uasminit 472void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg,
476ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 473 int lid)
477{ 474{
478 uasm_r_mips_pc16(r, *p, lid); 475 uasm_r_mips_pc16(r, *p, lid);
479 ISAFUNC(uasm_i_beqz)(p, reg, 0); 476 ISAFUNC(uasm_i_beqz)(p, reg, 0);
480} 477}
481UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); 478UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz));
482 479
483void __uasminit 480void ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
484ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 481 int lid)
485{ 482{
486 uasm_r_mips_pc16(r, *p, lid); 483 uasm_r_mips_pc16(r, *p, lid);
487 ISAFUNC(uasm_i_beqzl)(p, reg, 0); 484 ISAFUNC(uasm_i_beqzl)(p, reg, 0);
488} 485}
489UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); 486UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl));
490 487
491void __uasminit 488void ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1,
492ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, 489 unsigned int reg2, int lid)
493 unsigned int reg2, int lid)
494{ 490{
495 uasm_r_mips_pc16(r, *p, lid); 491 uasm_r_mips_pc16(r, *p, lid);
496 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); 492 ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0);
497} 493}
498UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); 494UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne));
499 495
500void __uasminit 496void ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
501ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 497 int lid)
502{ 498{
503 uasm_r_mips_pc16(r, *p, lid); 499 uasm_r_mips_pc16(r, *p, lid);
504 ISAFUNC(uasm_i_bnez)(p, reg, 0); 500 ISAFUNC(uasm_i_bnez)(p, reg, 0);
505} 501}
506UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); 502UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez));
507 503
508void __uasminit 504void ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg,
509ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 505 int lid)
510{ 506{
511 uasm_r_mips_pc16(r, *p, lid); 507 uasm_r_mips_pc16(r, *p, lid);
512 ISAFUNC(uasm_i_bgezl)(p, reg, 0); 508 ISAFUNC(uasm_i_bgezl)(p, reg, 0);
513} 509}
514UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); 510UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl));
515 511
516void __uasminit 512void ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg,
517ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 513 int lid)
518{ 514{
519 uasm_r_mips_pc16(r, *p, lid); 515 uasm_r_mips_pc16(r, *p, lid);
520 ISAFUNC(uasm_i_bgez)(p, reg, 0); 516 ISAFUNC(uasm_i_bgez)(p, reg, 0);
521} 517}
522UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); 518UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez));
523 519
524void __uasminit 520void ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg,
525ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, 521 unsigned int bit, int lid)
526 unsigned int bit, int lid)
527{ 522{
528 uasm_r_mips_pc16(r, *p, lid); 523 uasm_r_mips_pc16(r, *p, lid);
529 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); 524 ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0);
530} 525}
531UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); 526UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0));
532 527
533void __uasminit 528void ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg,
534ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, 529 unsigned int bit, int lid)
535 unsigned int bit, int lid)
536{ 530{
537 uasm_r_mips_pc16(r, *p, lid); 531 uasm_r_mips_pc16(r, *p, lid);
538 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); 532 ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0);
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index becbf47506a5..c4849904f013 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -32,7 +32,7 @@ static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
32/* 32/*
33 * Post-config but pre-boot cleanup entry point 33 * Post-config but pre-boot cleanup entry point
34 */ 34 */
35static void __cpuinit msmtc_init_secondary(void) 35static void msmtc_init_secondary(void)
36{ 36{
37 int myvpe; 37 int myvpe;
38 38
@@ -53,7 +53,7 @@ static void __cpuinit msmtc_init_secondary(void)
53/* 53/*
54 * Platform "CPU" startup hook 54 * Platform "CPU" startup hook
55 */ 55 */
56static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) 56static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
57{ 57{
58 smtc_boot_secondary(cpu, idle); 58 smtc_boot_secondary(cpu, idle);
59} 59}
@@ -61,7 +61,7 @@ static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
61/* 61/*
62 * SMP initialization finalization entry point 62 * SMP initialization finalization entry point
63 */ 63 */
64static void __cpuinit msmtc_smp_finish(void) 64static void msmtc_smp_finish(void)
65{ 65{
66 smtc_smp_finish(); 66 smtc_smp_finish();
67} 67}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 0ad305f75802..53aad4a35375 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -150,7 +150,7 @@ static void __init plat_perf_setup(void)
150 } 150 }
151} 151}
152 152
153unsigned int __cpuinit get_c0_compare_int(void) 153unsigned int get_c0_compare_int(void)
154{ 154{
155#ifdef MSC01E_INT_BASE 155#ifdef MSC01E_INT_BASE
156 if (cpu_has_veic) { 156 if (cpu_has_veic) {
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 96b42eb9b5e2..a43ea3cc0a3b 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -91,7 +91,7 @@ static void __init plat_perf_setup(void)
91 } 91 }
92} 92}
93 93
94unsigned int __cpuinit get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
95{ 95{
96 if (cpu_has_vint) 96 if (cpu_has_vint)
97 set_vi_handler(cp0_compare_irq, mips_timer_dispatch); 97 set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 73facb2b33bb..1c7e3a1b81ab 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -40,6 +40,10 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/irq.h> 41#include <linux/irq.h>
42 42
43#include <linux/irqdomain.h>
44#include <linux/of_address.h>
45#include <linux/of_irq.h>
46
43#include <asm/errno.h> 47#include <asm/errno.h>
44#include <asm/signal.h> 48#include <asm/signal.h>
45#include <asm/ptrace.h> 49#include <asm/ptrace.h>
@@ -223,17 +227,6 @@ static void nlm_init_node_irqs(int node)
223 nodep->irqmask = irqmask; 227 nodep->irqmask = irqmask;
224} 228}
225 229
226void __init arch_init_irq(void)
227{
228 /* Initialize the irq descriptors */
229 nlm_init_percpu_irqs();
230 nlm_init_node_irqs(0);
231 write_c0_eimr(nlm_current_node()->irqmask);
232#if defined(CONFIG_CPU_XLR)
233 nlm_setup_fmn_irq();
234#endif
235}
236
237void nlm_smp_irq_init(int hwcpuid) 230void nlm_smp_irq_init(int hwcpuid)
238{ 231{
239 int node, cpu; 232 int node, cpu;
@@ -266,3 +259,56 @@ asmlinkage void plat_irq_dispatch(void)
266 /* top level irq handling */ 259 /* top level irq handling */
267 do_IRQ(nlm_irq_to_xirq(node, i)); 260 do_IRQ(nlm_irq_to_xirq(node, i));
268} 261}
262
263#ifdef CONFIG_OF
264static struct irq_domain *xlp_pic_domain;
265
266static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
267 .xlate = irq_domain_xlate_onetwocell,
268};
269
270static int __init xlp_of_pic_init(struct device_node *node,
271 struct device_node *parent)
272{
273 const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
274 struct resource res;
275 int socid, ret;
276
277 /* we need a hack to get the PIC's SoC chip id */
278 ret = of_address_to_resource(node, 0, &res);
279 if (ret < 0) {
280 pr_err("PIC %s: reg property not found!\n", node->name);
281 return -EINVAL;
282 }
283 socid = (res.start >> 18) & 0x3;
284 xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
285 nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
286 &xlp_pic_irq_domain_ops, NULL);
287 if (xlp_pic_domain == NULL) {
288 pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
289 return -EINVAL;
290 }
291 pr_info("Node %d: IRQ domain created for PIC@%pa\n", socid,
292 &res.start);
293 return 0;
294}
295
296static struct of_device_id __initdata xlp_pic_irq_ids[] = {
297 { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
298 {},
299};
300#endif
301
302void __init arch_init_irq(void)
303{
304 /* Initialize the irq descriptors */
305 nlm_init_percpu_irqs();
306 nlm_init_node_irqs(0);
307 write_c0_eimr(nlm_current_node()->irqmask);
308#if defined(CONFIG_CPU_XLR)
309 nlm_setup_fmn_irq();
310#endif
311#if defined(CONFIG_OF)
312 of_irq_init(xlp_pic_irq_ids);
313#endif
314}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 885d293b61da..4e35d9c453e2 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -116,7 +116,7 @@ void nlm_early_init_secondary(int cpu)
116/* 116/*
117 * Code to run on secondary just after probing the CPU 117 * Code to run on secondary just after probing the CPU
118 */ 118 */
119static void __cpuinit nlm_init_secondary(void) 119static void nlm_init_secondary(void)
120{ 120{
121 int hwtid; 121 int hwtid;
122 122
@@ -252,7 +252,7 @@ unsupp:
252 return 0; 252 return 0;
253} 253}
254 254
255int __cpuinit nlm_wakeup_secondary_cpus(void) 255int nlm_wakeup_secondary_cpus(void)
256{ 256{
257 u32 *reset_data; 257 u32 *reset_data;
258 int threadmode; 258 int threadmode;
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 528c46c5a170..aa6cff0a229b 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -70,7 +70,6 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
70 nop 70 nop
71 /* not reached */ 71 /* not reached */
72 72
73 __CPUINIT
74NESTED(nlm_boot_secondary_cpus, 16, sp) 73NESTED(nlm_boot_secondary_cpus, 16, sp)
75 /* Initialize CP0 Status */ 74 /* Initialize CP0 Status */
76 move t1, zero 75 move t1, zero
@@ -94,7 +93,6 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
94 jr t0 93 jr t0
95 nop 94 nop
96END(nlm_boot_secondary_cpus) 95END(nlm_boot_secondary_cpus)
97 __FINIT
98 96
99/* 97/*
100 * In case of RMIboot bootloader which is used on XLR boards, the CPUs 98 * In case of RMIboot bootloader which is used on XLR boards, the CPUs
@@ -102,7 +100,6 @@ END(nlm_boot_secondary_cpus)
102 * This will get them out of the bootloader code and into linux. Needed 100 * This will get them out of the bootloader code and into linux. Needed
103 * because the bootloader area will be taken and initialized by linux. 101 * because the bootloader area will be taken and initialized by linux.
104 */ 102 */
105 __CPUINIT
106NESTED(nlm_rmiboot_preboot, 16, sp) 103NESTED(nlm_rmiboot_preboot, 16, sp)
107 mfc0 t0, $15, 1 /* read ebase */ 104 mfc0 t0, $15, 1 /* read ebase */
108 andi t0, 0x1f /* t0 has the processor_id() */ 105 andi t0, 0x1f /* t0 has the processor_id() */
@@ -140,4 +137,3 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
140 b 1b 137 b 1b
141 nop 138 nop
142END(nlm_rmiboot_preboot) 139END(nlm_rmiboot_preboot)
143 __FINIT
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index 5c56555380bb..045a396c57ce 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -54,7 +54,7 @@
54#error "Unknown CPU" 54#error "Unknown CPU"
55#endif 55#endif
56 56
57unsigned int __cpuinit get_c0_compare_int(void) 57unsigned int get_c0_compare_int(void)
58{ 58{
59 return IRQ_TIMER; 59 return IRQ_TIMER;
60} 60}
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index e14f42308064..06407033678e 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -76,10 +76,11 @@
76 }; 76 };
77 }; 77 };
78 pic: pic@4000 { 78 pic: pic@4000 {
79 interrupt-controller; 79 compatible = "netlogic,xlp-pic";
80 #address-cells = <0>; 80 #address-cells = <0>;
81 #interrupt-cells = <1>; 81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>; 82 reg = <0 0x4000 0x200>;
83 interrupt-controller;
83 }; 84 };
84 85
85 nor_flash@1,0 { 86 nor_flash@1,0 {
diff --git a/arch/mips/netlogic/dts/xlp_svp.dts b/arch/mips/netlogic/dts/xlp_svp.dts
index 8af4bdbe5d99..9c5db102df53 100644
--- a/arch/mips/netlogic/dts/xlp_svp.dts
+++ b/arch/mips/netlogic/dts/xlp_svp.dts
@@ -76,10 +76,11 @@
76 }; 76 };
77 }; 77 };
78 pic: pic@4000 { 78 pic: pic@4000 {
79 interrupt-controller; 79 compatible = "netlogic,xlp-pic";
80 #address-cells = <0>; 80 #address-cells = <0>;
81 #interrupt-cells = <1>; 81 #interrupt-cells = <1>;
82 reg = <0 0x4000 0x200>; 82 reg = <0 0x4000 0x200>;
83 interrupt-controller;
83 }; 84 };
84 85
85 nor_flash@1,0 { 86 nor_flash@1,0 {
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index 9c401dd78337..ef3897ef0dc7 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -119,7 +119,7 @@ static u64 xlp_usb_dmamask = ~(u32)0;
119static void nlm_usb_fixup_final(struct pci_dev *dev) 119static void nlm_usb_fixup_final(struct pci_dev *dev)
120{ 120{
121 dev->dev.dma_mask = &xlp_usb_dmamask; 121 dev->dev.dma_mask = &xlp_usb_dmamask;
122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 122 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
123 switch (dev->devfn) { 123 switch (dev->devfn) {
124 case 0x10: 124 case 0x10:
125 dev->irq = PIC_EHCI_0_IRQ; 125 dev->irq = PIC_EHCI_0_IRQ;
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index c06e4c9f0478..9fb81fa6272a 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -49,7 +49,7 @@
49#include <asm/netlogic/xlr/iomap.h> 49#include <asm/netlogic/xlr/iomap.h>
50#include <asm/netlogic/xlr/pic.h> 50#include <asm/netlogic/xlr/pic.h>
51 51
52int __cpuinit xlr_wakeup_secondary_cpus(void) 52int xlr_wakeup_secondary_cpus(void)
53{ 53{
54 struct nlm_soc_info *nodep; 54 struct nlm_soc_info *nodep;
55 unsigned int i, j, boot_cpu; 55 unsigned int i, j, boot_cpu;
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 7b2ac81e1f59..162b4cb29dba 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -42,7 +42,7 @@ int irq_to_slot[MAX_PCI_BUSSES * MAX_DEVICES_PER_PCIBUS];
42 42
43extern struct pci_ops bridge_pci_ops; 43extern struct pci_ops bridge_pci_ops;
44 44
45int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid) 45int bridge_probe(nasid_t nasid, int widget_id, int masterwid)
46{ 46{
47 unsigned long offset = NODE_OFFSET(nasid); 47 unsigned long offset = NODE_OFFSET(nasid);
48 struct bridge_controller *bc; 48 struct bridge_controller *bc;
diff --git a/arch/mips/pmcs-msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
index c8dcc1c01e18..6b5607fce279 100644
--- a/arch/mips/pmcs-msp71xx/msp_smtc.c
+++ b/arch/mips/pmcs-msp71xx/msp_smtc.c
@@ -33,7 +33,7 @@ static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
33/* 33/*
34 * Post-config but pre-boot cleanup entry point 34 * Post-config but pre-boot cleanup entry point
35 */ 35 */
36static void __cpuinit msp_smtc_init_secondary(void) 36static void msp_smtc_init_secondary(void)
37{ 37{
38 int myvpe; 38 int myvpe;
39 39
@@ -48,8 +48,7 @@ static void __cpuinit msp_smtc_init_secondary(void)
48/* 48/*
49 * Platform "CPU" startup hook 49 * Platform "CPU" startup hook
50 */ 50 */
51static void __cpuinit msp_smtc_boot_secondary(int cpu, 51static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
52 struct task_struct *idle)
53{ 52{
54 smtc_boot_secondary(cpu, idle); 53 smtc_boot_secondary(cpu, idle);
55} 54}
@@ -57,7 +56,7 @@ static void __cpuinit msp_smtc_boot_secondary(int cpu,
57/* 56/*
58 * SMP initialization finalization entry point 57 * SMP initialization finalization entry point
59 */ 58 */
60static void __cpuinit msp_smtc_smp_finish(void) 59static void msp_smtc_smp_finish(void)
61{ 60{
62 smtc_smp_finish(); 61 smtc_smp_finish();
63} 62}
diff --git a/arch/mips/pmcs-msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
index 8f12ecc55ace..fea917be0ff1 100644
--- a/arch/mips/pmcs-msp71xx/msp_time.c
+++ b/arch/mips/pmcs-msp71xx/msp_time.c
@@ -88,7 +88,7 @@ void __init plat_time_init(void)
88 mips_hpt_frequency = cpu_rate/2; 88 mips_hpt_frequency = cpu_rate/2;
89} 89}
90 90
91unsigned int __cpuinit get_c0_compare_int(void) 91unsigned int get_c0_compare_int(void)
92{ 92{
93 /* MIPS_MT modes may want timer for second VPE */ 93 /* MIPS_MT modes may want timer for second VPE */
94 if ((get_current_vpe()) && !tim_installed) { 94 if ((get_current_vpe()) && !tim_installed) {
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index a4a90596c0ad..e460865873c1 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -281,7 +281,7 @@ void __init arch_init_irq(void)
281 write_c0_status(read_c0_status() | IE_IRQ2); 281 write_c0_status(read_c0_status() | IE_IRQ2);
282} 282}
283 283
284unsigned int __cpuinit get_c0_compare_int(void) 284unsigned int get_c0_compare_int(void)
285{ 285{
286 if (cpu_has_vint) 286 if (cpu_has_vint)
287 set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch); 287 set_vi_handler(cp0_compare_irq, pnx833x_timer_dispatch);
diff --git a/arch/mips/powertv/time.c b/arch/mips/powertv/time.c
index 9fd7b67f2af7..f38b0d45eca9 100644
--- a/arch/mips/powertv/time.c
+++ b/arch/mips/powertv/time.c
@@ -25,7 +25,7 @@
25 25
26#include "powertv-clock.h" 26#include "powertv-clock.h"
27 27
28unsigned int __cpuinit get_c0_compare_int(void) 28unsigned int get_c0_compare_int(void)
29{ 29{
30 return irq_mips_timer; 30 return irq_mips_timer;
31} 31}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 320b1f1043ff..781b3d14a489 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -73,7 +73,7 @@ static struct irq_chip ralink_intc_irq_chip = {
73 .irq_mask_ack = ralink_intc_irq_mask, 73 .irq_mask_ack = ralink_intc_irq_mask,
74}; 74};
75 75
76unsigned int __cpuinit get_c0_compare_int(void) 76unsigned int get_c0_compare_int(void)
77{ 77{
78 return CP0_LEGACY_COMPARE_IRQ; 78 return CP0_LEGACY_COMPARE_IRQ;
79} 79}
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index d41b1c6fb032..ee736bd103f8 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -54,7 +54,7 @@ extern void pcibr_setup(cnodeid_t);
54 54
55extern void xtalk_probe_node(cnodeid_t nid); 55extern void xtalk_probe_node(cnodeid_t nid);
56 56
57static void __cpuinit per_hub_init(cnodeid_t cnode) 57static void per_hub_init(cnodeid_t cnode)
58{ 58{
59 struct hub_data *hub = hub_data(cnode); 59 struct hub_data *hub = hub_data(cnode);
60 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); 60 nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
@@ -110,7 +110,7 @@ static void __cpuinit per_hub_init(cnodeid_t cnode)
110 } 110 }
111} 111}
112 112
113void __cpuinit per_cpu_init(void) 113void per_cpu_init(void)
114{ 114{
115 int cpu = smp_processor_id(); 115 int cpu = smp_processor_id();
116 int slice = LOCAL_HUB_L(PI_CPU_NUM); 116 int slice = LOCAL_HUB_L(PI_CPU_NUM);
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index f94638141b20..f4ea8aa79ba2 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -173,12 +173,12 @@ static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
173 ip27_send_ipi_single(i, action); 173 ip27_send_ipi_single(i, action);
174} 174}
175 175
176static void __cpuinit ip27_init_secondary(void) 176static void ip27_init_secondary(void)
177{ 177{
178 per_cpu_init(); 178 per_cpu_init();
179} 179}
180 180
181static void __cpuinit ip27_smp_finish(void) 181static void ip27_smp_finish(void)
182{ 182{
183 extern void hub_rt_clock_event_init(void); 183 extern void hub_rt_clock_event_init(void);
184 184
@@ -195,7 +195,7 @@ static void __init ip27_cpus_done(void)
195 * set sp to the kernel stack of the newly created idle process, gp to the proc 195 * set sp to the kernel stack of the newly created idle process, gp to the proc
196 * struct so that current_thread_info() will work. 196 * struct so that current_thread_info() will work.
197 */ 197 */
198static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) 198static void ip27_boot_secondary(int cpu, struct task_struct *idle)
199{ 199{
200 unsigned long gp = (unsigned long)task_thread_info(idle); 200 unsigned long gp = (unsigned long)task_thread_info(idle);
201 unsigned long sp = __KSTK_TOS(idle); 201 unsigned long sp = __KSTK_TOS(idle);
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 2e21b761cb9c..1d97eaba0c5f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -106,7 +106,7 @@ struct irqaction hub_rt_irqaction = {
106#define NSEC_PER_CYCLE 800 106#define NSEC_PER_CYCLE 800
107#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE) 107#define CYCLES_PER_SEC (NSEC_PER_SEC / NSEC_PER_CYCLE)
108 108
109void __cpuinit hub_rt_clock_event_init(void) 109void hub_rt_clock_event_init(void)
110{ 110{
111 unsigned int cpu = smp_processor_id(); 111 unsigned int cpu = smp_processor_id();
112 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); 112 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu);
@@ -173,7 +173,7 @@ void __init plat_time_init(void)
173 hub_rt_clock_event_init(); 173 hub_rt_clock_event_init();
174} 174}
175 175
176void __cpuinit cpu_time_init(void) 176void cpu_time_init(void)
177{ 177{
178 lboard_t *board; 178 lboard_t *board;
179 klcpu_t *cpu; 179 klcpu_t *cpu;
@@ -194,7 +194,7 @@ void __cpuinit cpu_time_init(void)
194 set_c0_status(SRB_TIMOCLK); 194 set_c0_status(SRB_TIMOCLK);
195} 195}
196 196
197void __cpuinit hub_rtc_init(cnodeid_t cnode) 197void hub_rtc_init(cnodeid_t cnode)
198{ 198{
199 199
200 /* 200 /*
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index a4df7d0f6f12..d59b820f528d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -23,7 +23,7 @@
23 23
24extern int bridge_probe(nasid_t nasid, int widget, int masterwid); 24extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
25 25
26static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid) 26static int probe_one_port(nasid_t nasid, int widget, int masterwid)
27{ 27{
28 widgetreg_t widget_id; 28 widgetreg_t widget_id;
29 xwidget_part_num_t partnum; 29 xwidget_part_num_t partnum;
@@ -47,7 +47,7 @@ static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
47 return 0; 47 return 0;
48} 48}
49 49
50static int __cpuinit xbow_probe(nasid_t nasid) 50static int xbow_probe(nasid_t nasid)
51{ 51{
52 lboard_t *brd; 52 lboard_t *brd;
53 klxbow_t *xbow_p; 53 klxbow_t *xbow_p;
@@ -100,7 +100,7 @@ static int __cpuinit xbow_probe(nasid_t nasid)
100 return 0; 100 return 0;
101} 101}
102 102
103void __cpuinit xtalk_probe_node(cnodeid_t nid) 103void xtalk_probe_node(cnodeid_t nid)
104{ 104{
105 volatile u64 hubreg; 105 volatile u64 hubreg;
106 nasid_t nasid; 106 nasid_t nasid;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index de88e22694a0..54e2c4de15c1 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -60,7 +60,7 @@ static void *mailbox_0_regs[] = {
60/* 60/*
61 * SMP init and finish on secondary CPUs 61 * SMP init and finish on secondary CPUs
62 */ 62 */
63void __cpuinit bcm1480_smp_init(void) 63void bcm1480_smp_init(void)
64{ 64{
65 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 65 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
66 STATUSF_IP1 | STATUSF_IP0; 66 STATUSF_IP1 | STATUSF_IP0;
@@ -95,7 +95,7 @@ static void bcm1480_send_ipi_mask(const struct cpumask *mask,
95/* 95/*
96 * Code to run on secondary just after probing the CPU 96 * Code to run on secondary just after probing the CPU
97 */ 97 */
98static void __cpuinit bcm1480_init_secondary(void) 98static void bcm1480_init_secondary(void)
99{ 99{
100 extern void bcm1480_smp_init(void); 100 extern void bcm1480_smp_init(void);
101 101
@@ -106,7 +106,7 @@ static void __cpuinit bcm1480_init_secondary(void)
106 * Do any tidying up before marking online and running the idle 106 * Do any tidying up before marking online and running the idle
107 * loop 107 * loop
108 */ 108 */
109static void __cpuinit bcm1480_smp_finish(void) 109static void bcm1480_smp_finish(void)
110{ 110{
111 extern void sb1480_clockevent_init(void); 111 extern void sb1480_clockevent_init(void);
112 112
@@ -125,7 +125,7 @@ static void bcm1480_cpus_done(void)
125 * Setup the PC, SP, and GP of a secondary processor and start it 125 * Setup the PC, SP, and GP of a secondary processor and start it
126 * running! 126 * running!
127 */ 127 */
128static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) 128static void bcm1480_boot_secondary(int cpu, struct task_struct *idle)
129{ 129{
130 int retval; 130 int retval;
131 131
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index 285cfef4ebc0..d7b942db0ea5 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -48,7 +48,7 @@ static void *mailbox_regs[] = {
48/* 48/*
49 * SMP init and finish on secondary CPUs 49 * SMP init and finish on secondary CPUs
50 */ 50 */
51void __cpuinit sb1250_smp_init(void) 51void sb1250_smp_init(void)
52{ 52{
53 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 | 53 unsigned int imask = STATUSF_IP4 | STATUSF_IP3 | STATUSF_IP2 |
54 STATUSF_IP1 | STATUSF_IP0; 54 STATUSF_IP1 | STATUSF_IP0;
@@ -83,7 +83,7 @@ static inline void sb1250_send_ipi_mask(const struct cpumask *mask,
83/* 83/*
84 * Code to run on secondary just after probing the CPU 84 * Code to run on secondary just after probing the CPU
85 */ 85 */
86static void __cpuinit sb1250_init_secondary(void) 86static void sb1250_init_secondary(void)
87{ 87{
88 extern void sb1250_smp_init(void); 88 extern void sb1250_smp_init(void);
89 89
@@ -94,7 +94,7 @@ static void __cpuinit sb1250_init_secondary(void)
94 * Do any tidying up before marking online and running the idle 94 * Do any tidying up before marking online and running the idle
95 * loop 95 * loop
96 */ 96 */
97static void __cpuinit sb1250_smp_finish(void) 97static void sb1250_smp_finish(void)
98{ 98{
99 extern void sb1250_clockevent_init(void); 99 extern void sb1250_clockevent_init(void);
100 100
@@ -113,7 +113,7 @@ static void sb1250_cpus_done(void)
113 * Setup the PC, SP, and GP of a secondary processor and start it 113 * Setup the PC, SP, and GP of a secondary processor and start it
114 * running! 114 * running!
115 */ 115 */
116static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) 116static void sb1250_boot_secondary(int cpu, struct task_struct *idle)
117{ 117{
118 int retval; 118 int retval;
119 119
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index f4d5bedc3b4f..d7359ffbcbdd 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -267,7 +267,7 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask,
267 * 267 *
268 */ 268 */
269 269
270void __cpuinit calibrate_delay(void) 270void calibrate_delay(void)
271{ 271{
272 const int *val; 272 const int *val;
273 struct device_node *cpu = NULL; 273 struct device_node *cpu = NULL;
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
index f65fa480c905..22395901d47b 100644
--- a/arch/parisc/kernel/firmware.c
+++ b/arch/parisc/kernel/firmware.c
@@ -150,7 +150,7 @@ static void convert_to_wide(unsigned long *addr)
150} 150}
151 151
152#ifdef CONFIG_64BIT 152#ifdef CONFIG_64BIT
153void __cpuinit set_firmware_width_unlocked(void) 153void set_firmware_width_unlocked(void)
154{ 154{
155 int ret; 155 int ret;
156 156
@@ -167,7 +167,7 @@ void __cpuinit set_firmware_width_unlocked(void)
167 * This function must be called before any pdc_* function that uses the 167 * This function must be called before any pdc_* function that uses the
168 * convert_to_wide function. 168 * convert_to_wide function.
169 */ 169 */
170void __cpuinit set_firmware_width(void) 170void set_firmware_width(void)
171{ 171{
172 unsigned long flags; 172 unsigned long flags;
173 spin_lock_irqsave(&pdc_lock, flags); 173 spin_lock_irqsave(&pdc_lock, flags);
@@ -175,11 +175,13 @@ void __cpuinit set_firmware_width(void)
175 spin_unlock_irqrestore(&pdc_lock, flags); 175 spin_unlock_irqrestore(&pdc_lock, flags);
176} 176}
177#else 177#else
178void __cpuinit set_firmware_width_unlocked(void) { 178void set_firmware_width_unlocked(void)
179{
179 return; 180 return;
180} 181}
181 182
182void __cpuinit set_firmware_width(void) { 183void set_firmware_width(void)
184{
183 return; 185 return;
184} 186}
185#endif /*CONFIG_64BIT*/ 187#endif /*CONFIG_64BIT*/
@@ -301,7 +303,7 @@ int pdc_chassis_warn(unsigned long *warn)
301 return retval; 303 return retval;
302} 304}
303 305
304int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info) 306int pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
305{ 307{
306 int ret; 308 int ret;
307 309
@@ -322,7 +324,7 @@ int __cpuinit pdc_coproc_cfg_unlocked(struct pdc_coproc_cfg *pdc_coproc_info)
322 * This PDC call returns the presence and status of all the coprocessors 324 * This PDC call returns the presence and status of all the coprocessors
323 * attached to the processor. 325 * attached to the processor.
324 */ 326 */
325int __cpuinit pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info) 327int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info)
326{ 328{
327 int ret; 329 int ret;
328 unsigned long flags; 330 unsigned long flags;
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 872275659d98..06cb3992907e 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1367,7 +1367,7 @@ const char *parisc_hardware_description(struct parisc_device_id *id)
1367 1367
1368 1368
1369/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */ 1369/* Interpret hversion (ret[0]) from PDC_MODEL(4)/PDC_MODEL_INFO(0) */
1370enum cpu_type __cpuinit 1370enum cpu_type
1371parisc_get_cpu_type(unsigned long hversion) 1371parisc_get_cpu_type(unsigned long hversion)
1372{ 1372{
1373 struct hp_cpu_type_mask *ptr; 1373 struct hp_cpu_type_mask *ptr;
diff --git a/arch/parisc/kernel/processor.c b/arch/parisc/kernel/processor.c
index 8a96c8ab9fe6..b68d977ce30f 100644
--- a/arch/parisc/kernel/processor.c
+++ b/arch/parisc/kernel/processor.c
@@ -73,7 +73,7 @@ extern int update_cr16_clocksource(void); /* from time.c */
73 * 73 *
74 * FIXME: doesn't do much yet... 74 * FIXME: doesn't do much yet...
75 */ 75 */
76static void __cpuinit 76static void
77init_percpu_prof(unsigned long cpunum) 77init_percpu_prof(unsigned long cpunum)
78{ 78{
79 struct cpuinfo_parisc *p; 79 struct cpuinfo_parisc *p;
@@ -92,7 +92,7 @@ init_percpu_prof(unsigned long cpunum)
92 * (return 1). If so, initialize the chip and tell other partners in crime 92 * (return 1). If so, initialize the chip and tell other partners in crime
93 * they have work to do. 93 * they have work to do.
94 */ 94 */
95static int __cpuinit processor_probe(struct parisc_device *dev) 95static int processor_probe(struct parisc_device *dev)
96{ 96{
97 unsigned long txn_addr; 97 unsigned long txn_addr;
98 unsigned long cpuid; 98 unsigned long cpuid;
@@ -299,7 +299,7 @@ void __init collect_boot_cpu_data(void)
299 * 299 *
300 * o Enable CPU profiling hooks. 300 * o Enable CPU profiling hooks.
301 */ 301 */
302int __cpuinit init_per_cpu(int cpunum) 302int init_per_cpu(int cpunum)
303{ 303{
304 int ret; 304 int ret;
305 struct pdc_coproc_cfg coproc_cfg; 305 struct pdc_coproc_cfg coproc_cfg;
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index e3614fb343e5..8a252f2d6c08 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -62,9 +62,9 @@ static int smp_debug_lvl = 0;
62volatile struct task_struct *smp_init_current_idle_task; 62volatile struct task_struct *smp_init_current_idle_task;
63 63
64/* track which CPU is booting */ 64/* track which CPU is booting */
65static volatile int cpu_now_booting __cpuinitdata; 65static volatile int cpu_now_booting;
66 66
67static int parisc_max_cpus __cpuinitdata = 1; 67static int parisc_max_cpus = 1;
68 68
69static DEFINE_PER_CPU(spinlock_t, ipi_lock); 69static DEFINE_PER_CPU(spinlock_t, ipi_lock);
70 70
@@ -328,7 +328,7 @@ void __init smp_callin(void)
328/* 328/*
329 * Bring one cpu online. 329 * Bring one cpu online.
330 */ 330 */
331int __cpuinit smp_boot_one_cpu(int cpuid, struct task_struct *idle) 331int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
332{ 332{
333 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); 333 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
334 long timeout; 334 long timeout;
@@ -424,7 +424,7 @@ void smp_cpus_done(unsigned int cpu_max)
424} 424}
425 425
426 426
427int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 427int __cpu_up(unsigned int cpu, struct task_struct *tidle)
428{ 428{
429 if (cpu != 0 && cpu < parisc_max_cpus) 429 if (cpu != 0 && cpu < parisc_max_cpus)
430 smp_boot_one_cpu(cpu, tidle); 430 smp_boot_one_cpu(cpu, tidle);
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 09a8743143f3..d3e5e9bc8f94 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -55,6 +55,8 @@ struct device_node;
55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 55#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */ 56#define EEH_PE_PHB_DEAD (1 << 2) /* Dead PHB */
57 57
58#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
59
58struct eeh_pe { 60struct eeh_pe {
59 int type; /* PE type: PHB/Bus/Device */ 61 int type; /* PE type: PHB/Bus/Device */
60 int state; /* PE EEH dependent mode */ 62 int state; /* PE EEH dependent mode */
@@ -72,8 +74,8 @@ struct eeh_pe {
72 struct list_head child; /* Child PEs */ 74 struct list_head child; /* Child PEs */
73}; 75};
74 76
75#define eeh_pe_for_each_dev(pe, edev) \ 77#define eeh_pe_for_each_dev(pe, edev, tmp) \
76 list_for_each_entry(edev, &pe->edevs, list) 78 list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
77 79
78/* 80/*
79 * The struct is used to trace EEH state for the associated 81 * The struct is used to trace EEH state for the associated
@@ -82,7 +84,13 @@ struct eeh_pe {
82 * another tree except the currently existing tree of PCI 84 * another tree except the currently existing tree of PCI
83 * buses and PCI devices 85 * buses and PCI devices
84 */ 86 */
85#define EEH_DEV_IRQ_DISABLED (1<<0) /* Interrupt disabled */ 87#define EEH_DEV_BRIDGE (1 << 0) /* PCI bridge */
88#define EEH_DEV_ROOT_PORT (1 << 1) /* PCIe root port */
89#define EEH_DEV_DS_PORT (1 << 2) /* Downstream port */
90#define EEH_DEV_IRQ_DISABLED (1 << 3) /* Interrupt disabled */
91#define EEH_DEV_DISCONNECTED (1 << 4) /* Removing from PE */
92
93#define EEH_DEV_SYSFS (1 << 8) /* Sysfs created */
86 94
87struct eeh_dev { 95struct eeh_dev {
88 int mode; /* EEH mode */ 96 int mode; /* EEH mode */
@@ -90,11 +98,13 @@ struct eeh_dev {
90 int config_addr; /* Config address */ 98 int config_addr; /* Config address */
91 int pe_config_addr; /* PE config address */ 99 int pe_config_addr; /* PE config address */
92 u32 config_space[16]; /* Saved PCI config space */ 100 u32 config_space[16]; /* Saved PCI config space */
101 u8 pcie_cap; /* Saved PCIe capability */
93 struct eeh_pe *pe; /* Associated PE */ 102 struct eeh_pe *pe; /* Associated PE */
94 struct list_head list; /* Form link list in the PE */ 103 struct list_head list; /* Form link list in the PE */
95 struct pci_controller *phb; /* Associated PHB */ 104 struct pci_controller *phb; /* Associated PHB */
96 struct device_node *dn; /* Associated device node */ 105 struct device_node *dn; /* Associated device node */
97 struct pci_dev *pdev; /* Associated PCI device */ 106 struct pci_dev *pdev; /* Associated PCI device */
107 struct pci_bus *bus; /* PCI bus for partial hotplug */
98}; 108};
99 109
100static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev) 110static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
@@ -193,8 +203,10 @@ int eeh_phb_pe_create(struct pci_controller *phb);
193struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb); 203struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
194struct eeh_pe *eeh_pe_get(struct eeh_dev *edev); 204struct eeh_pe *eeh_pe_get(struct eeh_dev *edev);
195int eeh_add_to_parent_pe(struct eeh_dev *edev); 205int eeh_add_to_parent_pe(struct eeh_dev *edev);
196int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe); 206int eeh_rmv_from_parent_pe(struct eeh_dev *edev);
197void eeh_pe_update_time_stamp(struct eeh_pe *pe); 207void eeh_pe_update_time_stamp(struct eeh_pe *pe);
208void *eeh_pe_traverse(struct eeh_pe *root,
209 eeh_traverse_func fn, void *flag);
198void *eeh_pe_dev_traverse(struct eeh_pe *root, 210void *eeh_pe_dev_traverse(struct eeh_pe *root,
199 eeh_traverse_func fn, void *flag); 211 eeh_traverse_func fn, void *flag);
200void eeh_pe_restore_bars(struct eeh_pe *pe); 212void eeh_pe_restore_bars(struct eeh_pe *pe);
@@ -209,10 +221,12 @@ unsigned long eeh_check_failure(const volatile void __iomem *token,
209 unsigned long val); 221 unsigned long val);
210int eeh_dev_check_failure(struct eeh_dev *edev); 222int eeh_dev_check_failure(struct eeh_dev *edev);
211void eeh_addr_cache_build(void); 223void eeh_addr_cache_build(void);
224void eeh_add_device_early(struct device_node *);
212void eeh_add_device_tree_early(struct device_node *); 225void eeh_add_device_tree_early(struct device_node *);
226void eeh_add_device_late(struct pci_dev *);
213void eeh_add_device_tree_late(struct pci_bus *); 227void eeh_add_device_tree_late(struct pci_bus *);
214void eeh_add_sysfs_files(struct pci_bus *); 228void eeh_add_sysfs_files(struct pci_bus *);
215void eeh_remove_bus_device(struct pci_dev *, int); 229void eeh_remove_device(struct pci_dev *);
216 230
217/** 231/**
218 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure. 232 * EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
@@ -252,13 +266,17 @@ static inline unsigned long eeh_check_failure(const volatile void __iomem *token
252 266
253static inline void eeh_addr_cache_build(void) { } 267static inline void eeh_addr_cache_build(void) { }
254 268
269static inline void eeh_add_device_early(struct device_node *dn) { }
270
255static inline void eeh_add_device_tree_early(struct device_node *dn) { } 271static inline void eeh_add_device_tree_early(struct device_node *dn) { }
256 272
273static inline void eeh_add_device_late(struct pci_dev *dev) { }
274
257static inline void eeh_add_device_tree_late(struct pci_bus *bus) { } 275static inline void eeh_add_device_tree_late(struct pci_bus *bus) { }
258 276
259static inline void eeh_add_sysfs_files(struct pci_bus *bus) { } 277static inline void eeh_add_sysfs_files(struct pci_bus *bus) { }
260 278
261static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { } 279static inline void eeh_remove_device(struct pci_dev *dev) { }
262 280
263#define EEH_POSSIBLE_ERROR(val, type) (0) 281#define EEH_POSSIBLE_ERROR(val, type) (0)
264#define EEH_IO_ERROR_VALUE(size) (-1UL) 282#define EEH_IO_ERROR_VALUE(size) (-1UL)
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index ba713f166fa5..10be1dd01c6b 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,10 +96,11 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \ 99 u8 _was_enabled; \
100 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
101 get_paca()->soft_enabled = 0; \ 101 _was_enabled = local_paca->soft_enabled; \
102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 local_paca->soft_enabled = 0; \
103 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \ 104 if (_was_enabled) \
104 trace_hardirqs_off(); \ 105 trace_hardirqs_off(); \
105} while(0) 106} while(0)
diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h
index c1df590ec444..49fa55bfbac4 100644
--- a/arch/powerpc/include/asm/module.h
+++ b/arch/powerpc/include/asm/module.h
@@ -82,10 +82,9 @@ struct exception_table_entry;
82void sort_ex_table(struct exception_table_entry *start, 82void sort_ex_table(struct exception_table_entry *start,
83 struct exception_table_entry *finish); 83 struct exception_table_entry *finish);
84 84
85#ifdef CONFIG_MODVERSIONS 85#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
86#define ARCH_RELOCATES_KCRCTAB 86#define ARCH_RELOCATES_KCRCTAB
87 87#define reloc_start PHYSICAL_START
88extern const unsigned long reloc_start[];
89#endif 88#endif
90#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
91#endif /* _ASM_POWERPC_MODULE_H */ 90#endif /* _ASM_POWERPC_MODULE_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 2c1d8cb9b265..32d0d2018faf 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -209,7 +209,6 @@ static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 209extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
210 210
211/** Remove all of the PCI devices under this bus */ 211/** Remove all of the PCI devices under this bus */
212extern void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe);
213extern void pcibios_remove_pci_devices(struct pci_bus *bus); 212extern void pcibios_remove_pci_devices(struct pci_bus *bus);
214 213
215/** Discover new pci devices under this bus, and add them */ 214/** Discover new pci devices under this bus, and add them */
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5d7d9c2a5473..a6840e4e24f7 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1088,7 +1088,8 @@
1088#define PVR_970MP 0x0044 1088#define PVR_970MP 0x0044
1089#define PVR_970GX 0x0045 1089#define PVR_970GX 0x0045
1090#define PVR_POWER7p 0x004A 1090#define PVR_POWER7p 0x004A
1091#define PVR_POWER8 0x004B 1091#define PVR_POWER8E 0x004B
1092#define PVR_POWER8 0x004D
1092#define PVR_BE 0x0070 1093#define PVR_BE 0x0070
1093#define PVR_PA6T 0x0090 1094#define PVR_PA6T 0x0090
1094 1095
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 2a45d0f04385..22973a74df73 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -494,9 +494,27 @@ static struct cpu_spec __initdata cpu_specs[] = {
494 .cpu_restore = __restore_cpu_power7, 494 .cpu_restore = __restore_cpu_power7,
495 .platform = "power7+", 495 .platform = "power7+",
496 }, 496 },
497 { /* Power8 */ 497 { /* Power8E */
498 .pvr_mask = 0xffff0000, 498 .pvr_mask = 0xffff0000,
499 .pvr_value = 0x004b0000, 499 .pvr_value = 0x004b0000,
500 .cpu_name = "POWER8E (raw)",
501 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8,
503 .cpu_user_features2 = COMMON_USER2_POWER8,
504 .mmu_features = MMU_FTRS_POWER8,
505 .icache_bsize = 128,
506 .dcache_bsize = 128,
507 .num_pmcs = 6,
508 .pmc_type = PPC_PMC_IBM,
509 .oprofile_cpu_type = "ppc64/power8",
510 .oprofile_type = PPC_OPROFILE_INVALID,
511 .cpu_setup = __setup_cpu_power8,
512 .cpu_restore = __restore_cpu_power8,
513 .platform = "power8",
514 },
515 { /* Power8 */
516 .pvr_mask = 0xffff0000,
517 .pvr_value = 0x004d0000,
500 .cpu_name = "POWER8 (raw)", 518 .cpu_name = "POWER8 (raw)",
501 .cpu_features = CPU_FTRS_POWER8, 519 .cpu_features = CPU_FTRS_POWER8,
502 .cpu_user_features = COMMON_USER_POWER8, 520 .cpu_user_features = COMMON_USER_POWER8,
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 39954fe941b8..ea9414c8088d 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -231,7 +231,7 @@ static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
231void eeh_slot_error_detail(struct eeh_pe *pe, int severity) 231void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
232{ 232{
233 size_t loglen = 0; 233 size_t loglen = 0;
234 struct eeh_dev *edev; 234 struct eeh_dev *edev, *tmp;
235 bool valid_cfg_log = true; 235 bool valid_cfg_log = true;
236 236
237 /* 237 /*
@@ -251,7 +251,7 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
251 eeh_pe_restore_bars(pe); 251 eeh_pe_restore_bars(pe);
252 252
253 pci_regs_buf[0] = 0; 253 pci_regs_buf[0] = 0;
254 eeh_pe_for_each_dev(pe, edev) { 254 eeh_pe_for_each_dev(pe, edev, tmp) {
255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen, 255 loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
256 EEH_PCI_REGS_LOG_LEN - loglen); 256 EEH_PCI_REGS_LOG_LEN - loglen);
257 } 257 }
@@ -499,8 +499,6 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
499 } 499 }
500 500
501 eeh_dev_check_failure(edev); 501 eeh_dev_check_failure(edev);
502
503 pci_dev_put(eeh_dev_to_pci_dev(edev));
504 return val; 502 return val;
505} 503}
506 504
@@ -838,7 +836,7 @@ core_initcall_sync(eeh_init);
838 * on the CEC architecture, type of the device, on earlier boot 836 * on the CEC architecture, type of the device, on earlier boot
839 * command-line arguments & etc. 837 * command-line arguments & etc.
840 */ 838 */
841static void eeh_add_device_early(struct device_node *dn) 839void eeh_add_device_early(struct device_node *dn)
842{ 840{
843 struct pci_controller *phb; 841 struct pci_controller *phb;
844 842
@@ -886,7 +884,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
886 * This routine must be used to complete EEH initialization for PCI 884 * This routine must be used to complete EEH initialization for PCI
887 * devices that were added after system boot (e.g. hotplug, dlpar). 885 * devices that were added after system boot (e.g. hotplug, dlpar).
888 */ 886 */
889static void eeh_add_device_late(struct pci_dev *dev) 887void eeh_add_device_late(struct pci_dev *dev)
890{ 888{
891 struct device_node *dn; 889 struct device_node *dn;
892 struct eeh_dev *edev; 890 struct eeh_dev *edev;
@@ -902,9 +900,23 @@ static void eeh_add_device_late(struct pci_dev *dev)
902 pr_debug("EEH: Already referenced !\n"); 900 pr_debug("EEH: Already referenced !\n");
903 return; 901 return;
904 } 902 }
905 WARN_ON(edev->pdev);
906 903
907 pci_dev_get(dev); 904 /*
905 * The EEH cache might not be removed correctly because of
906 * unbalanced kref to the device during unplug time, which
907 * relies on pcibios_release_device(). So we have to remove
908 * that here explicitly.
909 */
910 if (edev->pdev) {
911 eeh_rmv_from_parent_pe(edev);
912 eeh_addr_cache_rmv_dev(edev->pdev);
913 eeh_sysfs_remove_device(edev->pdev);
914 edev->mode &= ~EEH_DEV_SYSFS;
915
916 edev->pdev = NULL;
917 dev->dev.archdata.edev = NULL;
918 }
919
908 edev->pdev = dev; 920 edev->pdev = dev;
909 dev->dev.archdata.edev = edev; 921 dev->dev.archdata.edev = edev;
910 922
@@ -967,7 +979,6 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
967/** 979/**
968 * eeh_remove_device - Undo EEH setup for the indicated pci device 980 * eeh_remove_device - Undo EEH setup for the indicated pci device
969 * @dev: pci device to be removed 981 * @dev: pci device to be removed
970 * @purge_pe: remove the PE or not
971 * 982 *
972 * This routine should be called when a device is removed from 983 * This routine should be called when a device is removed from
973 * a running system (e.g. by hotplug or dlpar). It unregisters 984 * a running system (e.g. by hotplug or dlpar). It unregisters
@@ -975,7 +986,7 @@ EXPORT_SYMBOL_GPL(eeh_add_sysfs_files);
975 * this device will no longer be detected after this call; thus, 986 * this device will no longer be detected after this call; thus,
976 * i/o errors affecting this slot may leave this device unusable. 987 * i/o errors affecting this slot may leave this device unusable.
977 */ 988 */
978static void eeh_remove_device(struct pci_dev *dev, int purge_pe) 989void eeh_remove_device(struct pci_dev *dev)
979{ 990{
980 struct eeh_dev *edev; 991 struct eeh_dev *edev;
981 992
@@ -986,42 +997,29 @@ static void eeh_remove_device(struct pci_dev *dev, int purge_pe)
986 /* Unregister the device with the EEH/PCI address search system */ 997 /* Unregister the device with the EEH/PCI address search system */
987 pr_debug("EEH: Removing device %s\n", pci_name(dev)); 998 pr_debug("EEH: Removing device %s\n", pci_name(dev));
988 999
989 if (!edev || !edev->pdev) { 1000 if (!edev || !edev->pdev || !edev->pe) {
990 pr_debug("EEH: Not referenced !\n"); 1001 pr_debug("EEH: Not referenced !\n");
991 return; 1002 return;
992 } 1003 }
1004
1005 /*
1006 * During the hotplug for EEH error recovery, we need the EEH
1007 * device attached to the parent PE in order for BAR restore
1008 * a bit later. So we keep it for BAR restore and remove it
1009 * from the parent PE during the BAR resotre.
1010 */
993 edev->pdev = NULL; 1011 edev->pdev = NULL;
994 dev->dev.archdata.edev = NULL; 1012 dev->dev.archdata.edev = NULL;
995 pci_dev_put(dev); 1013 if (!(edev->pe->state & EEH_PE_KEEP))
1014 eeh_rmv_from_parent_pe(edev);
1015 else
1016 edev->mode |= EEH_DEV_DISCONNECTED;
996 1017
997 eeh_rmv_from_parent_pe(edev, purge_pe);
998 eeh_addr_cache_rmv_dev(dev); 1018 eeh_addr_cache_rmv_dev(dev);
999 eeh_sysfs_remove_device(dev); 1019 eeh_sysfs_remove_device(dev);
1020 edev->mode &= ~EEH_DEV_SYSFS;
1000} 1021}
1001 1022
1002/**
1003 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1004 * @dev: PCI device
1005 * @purge_pe: remove the corresponding PE or not
1006 *
1007 * This routine must be called when a device is removed from the
1008 * running system through hotplug or dlpar. The corresponding
1009 * PCI address cache will be removed.
1010 */
1011void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe)
1012{
1013 struct pci_bus *bus = dev->subordinate;
1014 struct pci_dev *child, *tmp;
1015
1016 eeh_remove_device(dev, purge_pe);
1017
1018 if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1019 list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
1020 eeh_remove_bus_device(child, purge_pe);
1021 }
1022}
1023EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
1024
1025static int proc_eeh_show(struct seq_file *m, void *v) 1023static int proc_eeh_show(struct seq_file *m, void *v)
1026{ 1024{
1027 if (0 == eeh_subsystem_enabled) { 1025 if (0 == eeh_subsystem_enabled) {
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index f9ac1232a746..e8c9fd546a5c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -68,16 +68,12 @@ static inline struct eeh_dev *__eeh_addr_cache_get_device(unsigned long addr)
68 struct pci_io_addr_range *piar; 68 struct pci_io_addr_range *piar;
69 piar = rb_entry(n, struct pci_io_addr_range, rb_node); 69 piar = rb_entry(n, struct pci_io_addr_range, rb_node);
70 70
71 if (addr < piar->addr_lo) { 71 if (addr < piar->addr_lo)
72 n = n->rb_left; 72 n = n->rb_left;
73 } else { 73 else if (addr > piar->addr_hi)
74 if (addr > piar->addr_hi) { 74 n = n->rb_right;
75 n = n->rb_right; 75 else
76 } else { 76 return piar->edev;
77 pci_dev_get(piar->pcidev);
78 return piar->edev;
79 }
80 }
81 } 77 }
82 78
83 return NULL; 79 return NULL;
@@ -156,7 +152,6 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
156 if (!piar) 152 if (!piar)
157 return NULL; 153 return NULL;
158 154
159 pci_dev_get(dev);
160 piar->addr_lo = alo; 155 piar->addr_lo = alo;
161 piar->addr_hi = ahi; 156 piar->addr_hi = ahi;
162 piar->edev = pci_dev_to_eeh_dev(dev); 157 piar->edev = pci_dev_to_eeh_dev(dev);
@@ -250,7 +245,6 @@ restart:
250 245
251 if (piar->pcidev == dev) { 246 if (piar->pcidev == dev) {
252 rb_erase(n, &pci_io_addr_cache_root.rb_root); 247 rb_erase(n, &pci_io_addr_cache_root.rb_root);
253 pci_dev_put(piar->pcidev);
254 kfree(piar); 248 kfree(piar);
255 goto restart; 249 goto restart;
256 } 250 }
@@ -302,12 +296,10 @@ void eeh_addr_cache_build(void)
302 if (!edev) 296 if (!edev)
303 continue; 297 continue;
304 298
305 pci_dev_get(dev); /* matching put is in eeh_remove_device() */
306 dev->dev.archdata.edev = edev; 299 dev->dev.archdata.edev = edev;
307 edev->pdev = dev; 300 edev->pdev = dev;
308 301
309 eeh_addr_cache_insert_dev(dev); 302 eeh_addr_cache_insert_dev(dev);
310
311 eeh_sysfs_add_device(dev); 303 eeh_sysfs_add_device(dev);
312 } 304 }
313 305
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 2b1ce17cae50..36bed5a12750 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -143,10 +143,14 @@ static void eeh_disable_irq(struct pci_dev *dev)
143static void eeh_enable_irq(struct pci_dev *dev) 143static void eeh_enable_irq(struct pci_dev *dev)
144{ 144{
145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev); 145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146 struct irq_desc *desc;
146 147
147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) { 148 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 edev->mode &= ~EEH_DEV_IRQ_DISABLED; 149 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 enable_irq(dev->irq); 150
151 desc = irq_to_desc(dev->irq);
152 if (desc && desc->depth > 0)
153 enable_irq(dev->irq);
150 } 154 }
151} 155}
152 156
@@ -338,6 +342,54 @@ static void *eeh_report_failure(void *data, void *userdata)
338 return NULL; 342 return NULL;
339} 343}
340 344
345static void *eeh_rmv_device(void *data, void *userdata)
346{
347 struct pci_driver *driver;
348 struct eeh_dev *edev = (struct eeh_dev *)data;
349 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
350 int *removed = (int *)userdata;
351
352 /*
353 * Actually, we should remove the PCI bridges as well.
354 * However, that's lots of complexity to do that,
355 * particularly some of devices under the bridge might
356 * support EEH. So we just care about PCI devices for
357 * simplicity here.
358 */
359 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
360 return NULL;
361 driver = eeh_pcid_get(dev);
362 if (driver && driver->err_handler)
363 return NULL;
364
365 /* Remove it from PCI subsystem */
366 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
367 pci_name(dev));
368 edev->bus = dev->bus;
369 edev->mode |= EEH_DEV_DISCONNECTED;
370 (*removed)++;
371
372 pci_stop_and_remove_bus_device(dev);
373
374 return NULL;
375}
376
377static void *eeh_pe_detach_dev(void *data, void *userdata)
378{
379 struct eeh_pe *pe = (struct eeh_pe *)data;
380 struct eeh_dev *edev, *tmp;
381
382 eeh_pe_for_each_dev(pe, edev, tmp) {
383 if (!(edev->mode & EEH_DEV_DISCONNECTED))
384 continue;
385
386 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
387 eeh_rmv_from_parent_pe(edev);
388 }
389
390 return NULL;
391}
392
341/** 393/**
342 * eeh_reset_device - Perform actual reset of a pci slot 394 * eeh_reset_device - Perform actual reset of a pci slot
343 * @pe: EEH PE 395 * @pe: EEH PE
@@ -349,8 +401,9 @@ static void *eeh_report_failure(void *data, void *userdata)
349 */ 401 */
350static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus) 402static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
351{ 403{
404 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
352 struct timeval tstamp; 405 struct timeval tstamp;
353 int cnt, rc; 406 int cnt, rc, removed = 0;
354 407
355 /* pcibios will clear the counter; save the value */ 408 /* pcibios will clear the counter; save the value */
356 cnt = pe->freeze_count; 409 cnt = pe->freeze_count;
@@ -362,8 +415,11 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
362 * devices are expected to be attached soon when calling 415 * devices are expected to be attached soon when calling
363 * into pcibios_add_pci_devices(). 416 * into pcibios_add_pci_devices().
364 */ 417 */
418 eeh_pe_state_mark(pe, EEH_PE_KEEP);
365 if (bus) 419 if (bus)
366 __pcibios_remove_pci_devices(bus, 0); 420 pcibios_remove_pci_devices(bus);
421 else if (frozen_bus)
422 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
367 423
368 /* Reset the pci controller. (Asserts RST#; resets config space). 424 /* Reset the pci controller. (Asserts RST#; resets config space).
369 * Reconfigure bridges and devices. Don't try to bring the system 425 * Reconfigure bridges and devices. Don't try to bring the system
@@ -384,9 +440,24 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
384 * potentially weird things happen. 440 * potentially weird things happen.
385 */ 441 */
386 if (bus) { 442 if (bus) {
443 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
387 ssleep(5); 444 ssleep(5);
445
446 /*
447 * The EEH device is still connected with its parent
448 * PE. We should disconnect it so the binding can be
449 * rebuilt when adding PCI devices.
450 */
451 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
388 pcibios_add_pci_devices(bus); 452 pcibios_add_pci_devices(bus);
453 } else if (frozen_bus && removed) {
454 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
455 ssleep(5);
456
457 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
458 pcibios_add_pci_devices(frozen_bus);
389 } 459 }
460 eeh_pe_state_clear(pe, EEH_PE_KEEP);
390 461
391 pe->tstamp = tstamp; 462 pe->tstamp = tstamp;
392 pe->freeze_count = cnt; 463 pe->freeze_count = cnt;
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 016588a6f5ed..f9450537e335 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -149,8 +149,8 @@ static struct eeh_pe *eeh_pe_next(struct eeh_pe *pe,
149 * callback returns something other than NULL, or no more PEs 149 * callback returns something other than NULL, or no more PEs
150 * to be traversed. 150 * to be traversed.
151 */ 151 */
152static void *eeh_pe_traverse(struct eeh_pe *root, 152void *eeh_pe_traverse(struct eeh_pe *root,
153 eeh_traverse_func fn, void *flag) 153 eeh_traverse_func fn, void *flag)
154{ 154{
155 struct eeh_pe *pe; 155 struct eeh_pe *pe;
156 void *ret; 156 void *ret;
@@ -176,7 +176,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
176 eeh_traverse_func fn, void *flag) 176 eeh_traverse_func fn, void *flag)
177{ 177{
178 struct eeh_pe *pe; 178 struct eeh_pe *pe;
179 struct eeh_dev *edev; 179 struct eeh_dev *edev, *tmp;
180 void *ret; 180 void *ret;
181 181
182 if (!root) { 182 if (!root) {
@@ -186,7 +186,7 @@ void *eeh_pe_dev_traverse(struct eeh_pe *root,
186 186
187 /* Traverse root PE */ 187 /* Traverse root PE */
188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) { 188 for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
189 eeh_pe_for_each_dev(pe, edev) { 189 eeh_pe_for_each_dev(pe, edev, tmp) {
190 ret = fn(edev, flag); 190 ret = fn(edev, flag);
191 if (ret) 191 if (ret)
192 return ret; 192 return ret;
@@ -333,7 +333,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
333 while (parent) { 333 while (parent) {
334 if (!(parent->type & EEH_PE_INVALID)) 334 if (!(parent->type & EEH_PE_INVALID))
335 break; 335 break;
336 parent->type &= ~EEH_PE_INVALID; 336 parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
337 parent = parent->parent; 337 parent = parent->parent;
338 } 338 }
339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n", 339 pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
@@ -397,21 +397,20 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
397/** 397/**
398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE 398 * eeh_rmv_from_parent_pe - Remove one EEH device from the associated PE
399 * @edev: EEH device 399 * @edev: EEH device
400 * @purge_pe: remove PE or not
401 * 400 *
402 * The PE hierarchy tree might be changed when doing PCI hotplug. 401 * The PE hierarchy tree might be changed when doing PCI hotplug.
403 * Also, the PCI devices or buses could be removed from the system 402 * Also, the PCI devices or buses could be removed from the system
404 * during EEH recovery. So we have to call the function remove the 403 * during EEH recovery. So we have to call the function remove the
405 * corresponding PE accordingly if necessary. 404 * corresponding PE accordingly if necessary.
406 */ 405 */
407int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe) 406int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
408{ 407{
409 struct eeh_pe *pe, *parent, *child; 408 struct eeh_pe *pe, *parent, *child;
410 int cnt; 409 int cnt;
411 410
412 if (!edev->pe) { 411 if (!edev->pe) {
413 pr_warning("%s: No PE found for EEH device %s\n", 412 pr_debug("%s: No PE found for EEH device %s\n",
414 __func__, edev->dn->full_name); 413 __func__, edev->dn->full_name);
415 return -EEXIST; 414 return -EEXIST;
416 } 415 }
417 416
@@ -431,7 +430,7 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev, int purge_pe)
431 if (pe->type & EEH_PE_PHB) 430 if (pe->type & EEH_PE_PHB)
432 break; 431 break;
433 432
434 if (purge_pe) { 433 if (!(pe->state & EEH_PE_KEEP)) {
435 if (list_empty(&pe->edevs) && 434 if (list_empty(&pe->edevs) &&
436 list_empty(&pe->child_list)) { 435 list_empty(&pe->child_list)) {
437 list_del(&pe->child); 436 list_del(&pe->child);
@@ -502,7 +501,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
502{ 501{
503 struct eeh_pe *pe = (struct eeh_pe *)data; 502 struct eeh_pe *pe = (struct eeh_pe *)data;
504 int state = *((int *)flag); 503 int state = *((int *)flag);
505 struct eeh_dev *tmp; 504 struct eeh_dev *edev, *tmp;
506 struct pci_dev *pdev; 505 struct pci_dev *pdev;
507 506
508 /* 507 /*
@@ -512,8 +511,8 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
512 * the PCI device driver. 511 * the PCI device driver.
513 */ 512 */
514 pe->state |= state; 513 pe->state |= state;
515 eeh_pe_for_each_dev(pe, tmp) { 514 eeh_pe_for_each_dev(pe, edev, tmp) {
516 pdev = eeh_dev_to_pci_dev(tmp); 515 pdev = eeh_dev_to_pci_dev(edev);
517 if (pdev) 516 if (pdev)
518 pdev->error_state = pci_channel_io_frozen; 517 pdev->error_state = pci_channel_io_frozen;
519 } 518 }
@@ -579,7 +578,7 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
579 * blocked on normal path during the stage. So we need utilize 578 * blocked on normal path during the stage. So we need utilize
580 * eeh operations, which is always permitted. 579 * eeh operations, which is always permitted.
581 */ 580 */
582static void eeh_bridge_check_link(struct pci_dev *pdev, 581static void eeh_bridge_check_link(struct eeh_dev *edev,
583 struct device_node *dn) 582 struct device_node *dn)
584{ 583{
585 int cap; 584 int cap;
@@ -590,16 +589,17 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
590 * We only check root port and downstream ports of 589 * We only check root port and downstream ports of
591 * PCIe switches 590 * PCIe switches
592 */ 591 */
593 if (!pci_is_pcie(pdev) || 592 if (!(edev->mode & (EEH_DEV_ROOT_PORT | EEH_DEV_DS_PORT)))
594 (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
595 pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
596 return; 593 return;
597 594
598 pr_debug("%s: Check PCIe link for %s ...\n", 595 pr_debug("%s: Check PCIe link for %04x:%02x:%02x.%01x ...\n",
599 __func__, pci_name(pdev)); 596 __func__, edev->phb->global_number,
597 edev->config_addr >> 8,
598 PCI_SLOT(edev->config_addr & 0xFF),
599 PCI_FUNC(edev->config_addr & 0xFF));
600 600
601 /* Check slot status */ 601 /* Check slot status */
602 cap = pdev->pcie_cap; 602 cap = edev->pcie_cap;
603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val); 603 eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
604 if (!(val & PCI_EXP_SLTSTA_PDS)) { 604 if (!(val & PCI_EXP_SLTSTA_PDS)) {
605 pr_debug(" No card in the slot (0x%04x) !\n", val); 605 pr_debug(" No card in the slot (0x%04x) !\n", val);
@@ -653,8 +653,7 @@ static void eeh_bridge_check_link(struct pci_dev *pdev,
653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 653#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)]) 654#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
655 655
656static void eeh_restore_bridge_bars(struct pci_dev *pdev, 656static void eeh_restore_bridge_bars(struct eeh_dev *edev,
657 struct eeh_dev *edev,
658 struct device_node *dn) 657 struct device_node *dn)
659{ 658{
660 int i; 659 int i;
@@ -680,7 +679,7 @@ static void eeh_restore_bridge_bars(struct pci_dev *pdev,
680 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]); 679 eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
681 680
682 /* Check the PCIe link is ready */ 681 /* Check the PCIe link is ready */
683 eeh_bridge_check_link(pdev, dn); 682 eeh_bridge_check_link(edev, dn);
684} 683}
685 684
686static void eeh_restore_device_bars(struct eeh_dev *edev, 685static void eeh_restore_device_bars(struct eeh_dev *edev,
@@ -729,19 +728,12 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
729 */ 728 */
730static void *eeh_restore_one_device_bars(void *data, void *flag) 729static void *eeh_restore_one_device_bars(void *data, void *flag)
731{ 730{
732 struct pci_dev *pdev = NULL;
733 struct eeh_dev *edev = (struct eeh_dev *)data; 731 struct eeh_dev *edev = (struct eeh_dev *)data;
734 struct device_node *dn = eeh_dev_to_of_node(edev); 732 struct device_node *dn = eeh_dev_to_of_node(edev);
735 733
736 /* Trace the PCI bridge */ 734 /* Do special restore for bridges */
737 if (eeh_probe_mode_dev()) { 735 if (edev->mode & EEH_DEV_BRIDGE)
738 pdev = eeh_dev_to_pci_dev(edev); 736 eeh_restore_bridge_bars(edev, dn);
739 if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
740 pdev = NULL;
741 }
742
743 if (pdev)
744 eeh_restore_bridge_bars(pdev, edev, dn);
745 else 737 else
746 eeh_restore_device_bars(edev, dn); 738 eeh_restore_device_bars(edev, dn);
747 739
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index e7ae3484918c..5d753d4f2c75 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -56,19 +56,40 @@ EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
56 56
57void eeh_sysfs_add_device(struct pci_dev *pdev) 57void eeh_sysfs_add_device(struct pci_dev *pdev)
58{ 58{
59 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
59 int rc=0; 60 int rc=0;
60 61
62 if (edev && (edev->mode & EEH_DEV_SYSFS))
63 return;
64
61 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode); 65 rc += device_create_file(&pdev->dev, &dev_attr_eeh_mode);
62 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr); 66 rc += device_create_file(&pdev->dev, &dev_attr_eeh_config_addr);
63 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 67 rc += device_create_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
64 68
65 if (rc) 69 if (rc)
66 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n"); 70 printk(KERN_WARNING "EEH: Unable to create sysfs entries\n");
71 else if (edev)
72 edev->mode |= EEH_DEV_SYSFS;
67} 73}
68 74
69void eeh_sysfs_remove_device(struct pci_dev *pdev) 75void eeh_sysfs_remove_device(struct pci_dev *pdev)
70{ 76{
77 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev);
78
79 /*
80 * The parent directory might have been removed. We needn't
81 * continue for that case.
82 */
83 if (!pdev->dev.kobj.sd) {
84 if (edev)
85 edev->mode &= ~EEH_DEV_SYSFS;
86 return;
87 }
88
71 device_remove_file(&pdev->dev, &dev_attr_eeh_mode); 89 device_remove_file(&pdev->dev, &dev_attr_eeh_mode);
72 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr); 90 device_remove_file(&pdev->dev, &dev_attr_eeh_config_addr);
73 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr); 91 device_remove_file(&pdev->dev, &dev_attr_eeh_pe_config_addr);
92
93 if (edev)
94 edev->mode &= ~EEH_DEV_SYSFS;
74} 95}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f46914a0f33e..7d22a675fe1a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1462,6 +1462,8 @@ void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1462 /* Allocate bus and devices resources */ 1462 /* Allocate bus and devices resources */
1463 pcibios_allocate_bus_resources(bus); 1463 pcibios_allocate_bus_resources(bus);
1464 pcibios_claim_one_bus(bus); 1464 pcibios_claim_one_bus(bus);
1465 if (!pci_has_flag(PCI_PROBE_ONLY))
1466 pci_assign_unassigned_bus_resources(bus);
1465 1467
1466 /* Fixup EEH */ 1468 /* Fixup EEH */
1467 eeh_add_device_tree_late(bus); 1469 eeh_add_device_tree_late(bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 3f608800c06b..c1e17ae68a08 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -22,45 +22,40 @@
22#include <asm/eeh.h> 22#include <asm/eeh.h>
23 23
24/** 24/**
25 * __pcibios_remove_pci_devices - remove all devices under this bus 25 * pcibios_release_device - release PCI device
26 * @dev: PCI device
27 *
28 * The function is called before releasing the indicated PCI device.
29 */
30void pcibios_release_device(struct pci_dev *dev)
31{
32 eeh_remove_device(dev);
33}
34
35/**
36 * pcibios_remove_pci_devices - remove all devices under this bus
26 * @bus: the indicated PCI bus 37 * @bus: the indicated PCI bus
27 * @purge_pe: destroy the PE on removal of PCI devices
28 * 38 *
29 * Remove all of the PCI devices under this bus both from the 39 * Remove all of the PCI devices under this bus both from the
30 * linux pci device tree, and from the powerpc EEH address cache. 40 * linux pci device tree, and from the powerpc EEH address cache.
31 * By default, the corresponding PE will be destroied during the
32 * normal PCI hotplug path. For PCI hotplug during EEH recovery,
33 * the corresponding PE won't be destroied and deallocated.
34 */ 41 */
35void __pcibios_remove_pci_devices(struct pci_bus *bus, int purge_pe) 42void pcibios_remove_pci_devices(struct pci_bus *bus)
36{ 43{
37 struct pci_dev *dev, *tmp; 44 struct pci_dev *dev, *tmp;
38 struct pci_bus *child_bus; 45 struct pci_bus *child_bus;
39 46
40 /* First go down child busses */ 47 /* First go down child busses */
41 list_for_each_entry(child_bus, &bus->children, node) 48 list_for_each_entry(child_bus, &bus->children, node)
42 __pcibios_remove_pci_devices(child_bus, purge_pe); 49 pcibios_remove_pci_devices(child_bus);
43 50
44 pr_debug("PCI: Removing devices on bus %04x:%02x\n", 51 pr_debug("PCI: Removing devices on bus %04x:%02x\n",
45 pci_domain_nr(bus), bus->number); 52 pci_domain_nr(bus), bus->number);
46 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 53 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
47 pr_debug(" * Removing %s...\n", pci_name(dev)); 54 pr_debug(" Removing %s...\n", pci_name(dev));
48 eeh_remove_bus_device(dev, purge_pe);
49 pci_stop_and_remove_bus_device(dev); 55 pci_stop_and_remove_bus_device(dev);
50 } 56 }
51} 57}
52 58
53/**
54 * pcibios_remove_pci_devices - remove all devices under this bus
55 * @bus: the indicated PCI bus
56 *
57 * Remove all of the PCI devices under this bus both from the
58 * linux pci device tree, and from the powerpc EEH address cache.
59 */
60void pcibios_remove_pci_devices(struct pci_bus *bus)
61{
62 __pcibios_remove_pci_devices(bus, 1);
63}
64EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 59EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
65 60
66/** 61/**
@@ -76,7 +71,7 @@ EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
76 */ 71 */
77void pcibios_add_pci_devices(struct pci_bus * bus) 72void pcibios_add_pci_devices(struct pci_bus * bus)
78{ 73{
79 int slotno, num, mode, pass, max; 74 int slotno, mode, pass, max;
80 struct pci_dev *dev; 75 struct pci_dev *dev;
81 struct device_node *dn = pci_bus_to_OF_node(bus); 76 struct device_node *dn = pci_bus_to_OF_node(bus);
82 77
@@ -90,11 +85,15 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
90 /* use ofdt-based probe */ 85 /* use ofdt-based probe */
91 of_rescan_bus(dn, bus); 86 of_rescan_bus(dn, bus);
92 } else if (mode == PCI_PROBE_NORMAL) { 87 } else if (mode == PCI_PROBE_NORMAL) {
93 /* use legacy probe */ 88 /*
89 * Use legacy probe. In the partial hotplug case, we
90 * probably have grandchildren devices unplugged. So
91 * we don't check the return value from pci_scan_slot() in
92 * order for fully rescan all the way down to pick them up.
93 * They can have been removed during partial hotplug.
94 */
94 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); 95 slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
95 num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); 96 pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
96 if (!num)
97 return;
98 pcibios_setup_bus_devices(bus); 97 pcibios_setup_bus_devices(bus);
99 max = bus->busn_res.start; 98 max = bus->busn_res.start;
100 for (pass = 0; pass < 2; pass++) { 99 for (pass = 0; pass < 2; pass++) {
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 6b0ba5854d99..15d9105323bf 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -230,11 +230,14 @@ void of_scan_pci_bridge(struct pci_dev *dev)
230 return; 230 return;
231 } 231 }
232 232
233 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 233 bus = pci_find_bus(pci_domain_nr(dev->bus), busrange[0]);
234 if (!bus) { 234 if (!bus) {
235 printk(KERN_ERR "Failed to create pci bus for %s\n", 235 bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
236 node->full_name); 236 if (!bus) {
237 return; 237 printk(KERN_ERR "Failed to create pci bus for %s\n",
238 node->full_name);
239 return;
240 }
238 } 241 }
239 242
240 bus->primary = dev->bus->number; 243 bus->primary = dev->bus->number;
@@ -292,6 +295,38 @@ void of_scan_pci_bridge(struct pci_dev *dev)
292} 295}
293EXPORT_SYMBOL(of_scan_pci_bridge); 296EXPORT_SYMBOL(of_scan_pci_bridge);
294 297
298static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
299 struct device_node *dn)
300{
301 struct pci_dev *dev = NULL;
302 const u32 *reg;
303 int reglen, devfn;
304
305 pr_debug(" * %s\n", dn->full_name);
306 if (!of_device_is_available(dn))
307 return NULL;
308
309 reg = of_get_property(dn, "reg", &reglen);
310 if (reg == NULL || reglen < 20)
311 return NULL;
312 devfn = (reg[0] >> 8) & 0xff;
313
314 /* Check if the PCI device is already there */
315 dev = pci_get_slot(bus, devfn);
316 if (dev) {
317 pci_dev_put(dev);
318 return dev;
319 }
320
321 /* create a new pci_dev for this device */
322 dev = of_create_pci_dev(dn, bus, devfn);
323 if (!dev)
324 return NULL;
325
326 pr_debug(" dev header type: %x\n", dev->hdr_type);
327 return dev;
328}
329
295/** 330/**
296 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices 331 * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices
297 * @node: device tree node for the PCI bus 332 * @node: device tree node for the PCI bus
@@ -302,8 +337,6 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
302 int rescan_existing) 337 int rescan_existing)
303{ 338{
304 struct device_node *child; 339 struct device_node *child;
305 const u32 *reg;
306 int reglen, devfn;
307 struct pci_dev *dev; 340 struct pci_dev *dev;
308 341
309 pr_debug("of_scan_bus(%s) bus no %d...\n", 342 pr_debug("of_scan_bus(%s) bus no %d...\n",
@@ -311,16 +344,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
311 344
312 /* Scan direct children */ 345 /* Scan direct children */
313 for_each_child_of_node(node, child) { 346 for_each_child_of_node(node, child) {
314 pr_debug(" * %s\n", child->full_name); 347 dev = of_scan_pci_dev(bus, child);
315 if (!of_device_is_available(child))
316 continue;
317 reg = of_get_property(child, "reg", &reglen);
318 if (reg == NULL || reglen < 20)
319 continue;
320 devfn = (reg[0] >> 8) & 0xff;
321
322 /* create a new pci_dev for this device */
323 dev = of_create_pci_dev(child, bus, devfn);
324 if (!dev) 348 if (!dev)
325 continue; 349 continue;
326 pr_debug(" dev header type: %x\n", dev->hdr_type); 350 pr_debug(" dev header type: %x\n", dev->hdr_type);
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 5eccda9fd33f..607902424e73 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -644,7 +644,8 @@ unsigned char ibm_architecture_vec[] = {
644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */ 644 W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
645 W(0xffff0000), W(0x003e0000), /* POWER6 */ 645 W(0xffff0000), W(0x003e0000), /* POWER6 */
646 W(0xffff0000), W(0x003f0000), /* POWER7 */ 646 W(0xffff0000), W(0x003f0000), /* POWER7 */
647 W(0xffff0000), W(0x004b0000), /* POWER8 */ 647 W(0xffff0000), W(0x004b0000), /* POWER8E */
648 W(0xffff0000), W(0x004d0000), /* POWER8 */
648 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */ 649 W(0xffffffff), W(0x0f000004), /* all 2.07-compliant */
649 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */ 650 W(0xffffffff), W(0x0f000003), /* all 2.06-compliant */
650 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */ 651 W(0xffffffff), W(0x0f000002), /* all 2.05-compliant */
@@ -706,7 +707,7 @@ unsigned char ibm_architecture_vec[] = {
706 * must match by the macro below. Update the definition if 707 * must match by the macro below. Update the definition if
707 * the structure layout changes. 708 * the structure layout changes.
708 */ 709 */
709#define IBM_ARCH_VEC_NRCORES_OFFSET 117 710#define IBM_ARCH_VEC_NRCORES_OFFSET 125
710 W(NR_CPUS), /* number of cores supported */ 711 W(NR_CPUS), /* number of cores supported */
711 0, 712 0,
712 0, 713 0,
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 654e479802f2..f096e72262f4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4;
38#endif 38#endif
39SECTIONS 39SECTIONS
40{ 40{
41 . = 0;
42 reloc_start = .;
43
44 . = KERNELBASE; 41 . = KERNELBASE;
45 42
46/* 43/*
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 3f0c30ae4791..c33d939120c9 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -43,6 +43,7 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
43{ 43{
44 unsigned long va; 44 unsigned long va;
45 unsigned int penc; 45 unsigned int penc;
46 unsigned long sllp;
46 47
47 /* 48 /*
48 * We need 14 to 65 bits of va for a tlibe of 4K page 49 * We need 14 to 65 bits of va for a tlibe of 4K page
@@ -64,7 +65,9 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
64 /* clear out bits after (52) [0....52.....63] */ 65 /* clear out bits after (52) [0....52.....63] */
65 va &= ~((1ul << (64 - 52)) - 1); 66 va &= ~((1ul << (64 - 52)) - 1);
66 va |= ssize << 8; 67 va |= ssize << 8;
67 va |= mmu_psize_defs[apsize].sllp << 6; 68 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
69 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
70 va |= sllp << 5;
68 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) 71 asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
69 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) 72 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70 : "memory"); 73 : "memory");
@@ -98,6 +101,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
98{ 101{
99 unsigned long va; 102 unsigned long va;
100 unsigned int penc; 103 unsigned int penc;
104 unsigned long sllp;
101 105
102 /* VPN_SHIFT can be atmost 12 */ 106 /* VPN_SHIFT can be atmost 12 */
103 va = vpn << VPN_SHIFT; 107 va = vpn << VPN_SHIFT;
@@ -113,7 +117,9 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
113 /* clear out bits after(52) [0....52.....63] */ 117 /* clear out bits after(52) [0....52.....63] */
114 va &= ~((1ul << (64 - 52)) - 1); 118 va &= ~((1ul << (64 - 52)) - 1);
115 va |= ssize << 8; 119 va |= ssize << 8;
116 va |= mmu_psize_defs[apsize].sllp << 6; 120 sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
121 ((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
122 va |= sllp << 5;
117 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)" 123 asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
118 : : "r"(va) : "memory"); 124 : : "r"(va) : "memory");
119 break; 125 break;
@@ -554,6 +560,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
554 seg_off |= vpi << shift; 560 seg_off |= vpi << shift;
555 } 561 }
556 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT; 562 *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
563 break;
557 case MMU_SEGSIZE_1T: 564 case MMU_SEGSIZE_1T:
558 /* We only have 40 - 23 bits of seg_off in avpn */ 565 /* We only have 40 - 23 bits of seg_off in avpn */
559 seg_off = (avpn & 0x1ffff) << 23; 566 seg_off = (avpn & 0x1ffff) << 23;
@@ -563,6 +570,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
563 seg_off |= vpi << shift; 570 seg_off |= vpi << shift;
564 } 571 }
565 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT; 572 *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
573 break;
566 default: 574 default:
567 *vpn = size = 0; 575 *vpn = size = 0;
568 } 576 }
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a3985aee77fe..24a45f91c65f 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1252,8 +1252,11 @@ nocheck:
1252 1252
1253 ret = 0; 1253 ret = 0;
1254 out: 1254 out:
1255 if (has_branch_stack(event)) 1255 if (has_branch_stack(event)) {
1256 power_pmu_bhrb_enable(event); 1256 power_pmu_bhrb_enable(event);
1257 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1258 event->attr.branch_sample_type);
1259 }
1257 1260
1258 perf_pmu_enable(event->pmu); 1261 perf_pmu_enable(event->pmu);
1259 local_irq_restore(flags); 1262 local_irq_restore(flags);
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 96a64d6a8bdf..7466374d2787 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -561,18 +561,13 @@ static int power8_generic_events[] = {
561static u64 power8_bhrb_filter_map(u64 branch_sample_type) 561static u64 power8_bhrb_filter_map(u64 branch_sample_type)
562{ 562{
563 u64 pmu_bhrb_filter = 0; 563 u64 pmu_bhrb_filter = 0;
564 u64 br_privilege = branch_sample_type & ONLY_PLM;
565 564
566 /* BHRB and regular PMU events share the same prvillege state 565 /* BHRB and regular PMU events share the same privilege state
567 * filter configuration. BHRB is always recorded along with a 566 * filter configuration. BHRB is always recorded along with a
568 * regular PMU event. So privilege state filter criteria for BHRB 567 * regular PMU event. As the privilege state filter is handled
569 * and the companion PMU events has to be the same. As a default 568 * in the basic PMC configuration of the accompanying regular
570 * "perf record" tool sets all privillege bits ON when no filter 569 * PMU event, we ignore any separate BHRB specific request.
571 * criteria is provided in the command line. So as along as all
572 * privillege bits are ON or they are OFF, we are good to go.
573 */ 570 */
574 if ((br_privilege != 7) && (br_privilege != 0))
575 return -1;
576 571
577 /* No branch filter requested */ 572 /* No branch filter requested */
578 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY) 573 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
@@ -621,10 +616,19 @@ static struct power_pmu power8_pmu = {
621 616
622static int __init init_power8_pmu(void) 617static int __init init_power8_pmu(void)
623{ 618{
619 int rc;
620
624 if (!cur_cpu_spec->oprofile_cpu_type || 621 if (!cur_cpu_spec->oprofile_cpu_type ||
625 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8")) 622 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
626 return -ENODEV; 623 return -ENODEV;
627 624
628 return register_power_pmu(&power8_pmu); 625 rc = register_power_pmu(&power8_pmu);
626 if (rc)
627 return rc;
628
629 /* Tell userspace that EBB is supported */
630 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
631
632 return 0;
629} 633}
630early_initcall(init_power8_pmu); 634early_initcall(init_power8_pmu);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 969cce73055a..79663d26e6ea 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -114,7 +114,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
114 * the root bridge. So it's not reasonable to continue 114 * the root bridge. So it's not reasonable to continue
115 * the probing. 115 * the probing.
116 */ 116 */
117 if (!dn || !edev) 117 if (!dn || !edev || edev->pe)
118 return 0; 118 return 0;
119 119
120 /* Skip for PCI-ISA bridge */ 120 /* Skip for PCI-ISA bridge */
@@ -122,8 +122,19 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
122 return 0; 122 return 0;
123 123
124 /* Initialize eeh device */ 124 /* Initialize eeh device */
125 edev->class_code = dev->class; 125 edev->class_code = dev->class;
126 edev->mode = 0; 126 edev->mode &= 0xFFFFFF00;
127 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
128 edev->mode |= EEH_DEV_BRIDGE;
129 if (pci_is_pcie(dev)) {
130 edev->pcie_cap = pci_pcie_cap(dev);
131
132 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
133 edev->mode |= EEH_DEV_ROOT_PORT;
134 else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
135 edev->mode |= EEH_DEV_DS_PORT;
136 }
137
127 edev->config_addr = ((dev->bus->number << 8) | dev->devfn); 138 edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
128 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff); 139 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
129 140
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 49b57b9f835d..d8140b125e62 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1266,7 +1266,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE); 1266 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1267} 1267}
1268 1268
1269void pnv_pci_init_ioda2_phb(struct device_node *np) 1269void __init pnv_pci_init_ioda2_phb(struct device_node *np)
1270{ 1270{
1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2); 1271 pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
1272} 1272}
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 1bd3399146ed..62b4f8025de0 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -19,7 +19,6 @@ config PPC_PSERIES
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG if SMP
23 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
24 default y 23 default y
25 24
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index b456b157d33d..7fbc25b1813f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -133,6 +133,48 @@ static int pseries_eeh_init(void)
133 return 0; 133 return 0;
134} 134}
135 135
136static int pseries_eeh_cap_start(struct device_node *dn)
137{
138 struct pci_dn *pdn = PCI_DN(dn);
139 u32 status;
140
141 if (!pdn)
142 return 0;
143
144 rtas_read_config(pdn, PCI_STATUS, 2, &status);
145 if (!(status & PCI_STATUS_CAP_LIST))
146 return 0;
147
148 return PCI_CAPABILITY_LIST;
149}
150
151
152static int pseries_eeh_find_cap(struct device_node *dn, int cap)
153{
154 struct pci_dn *pdn = PCI_DN(dn);
155 int pos = pseries_eeh_cap_start(dn);
156 int cnt = 48; /* Maximal number of capabilities */
157 u32 id;
158
159 if (!pos)
160 return 0;
161
162 while (cnt--) {
163 rtas_read_config(pdn, pos, 1, &pos);
164 if (pos < 0x40)
165 break;
166 pos &= ~3;
167 rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
168 if (id == 0xff)
169 break;
170 if (id == cap)
171 return pos;
172 pos += PCI_CAP_LIST_NEXT;
173 }
174
175 return 0;
176}
177
136/** 178/**
137 * pseries_eeh_of_probe - EEH probe on the given device 179 * pseries_eeh_of_probe - EEH probe on the given device
138 * @dn: OF node 180 * @dn: OF node
@@ -146,14 +188,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
146{ 188{
147 struct eeh_dev *edev; 189 struct eeh_dev *edev;
148 struct eeh_pe pe; 190 struct eeh_pe pe;
191 struct pci_dn *pdn = PCI_DN(dn);
149 const u32 *class_code, *vendor_id, *device_id; 192 const u32 *class_code, *vendor_id, *device_id;
150 const u32 *regs; 193 const u32 *regs;
194 u32 pcie_flags;
151 int enable = 0; 195 int enable = 0;
152 int ret; 196 int ret;
153 197
154 /* Retrieve OF node and eeh device */ 198 /* Retrieve OF node and eeh device */
155 edev = of_node_to_eeh_dev(dn); 199 edev = of_node_to_eeh_dev(dn);
156 if (!of_device_is_available(dn)) 200 if (edev->pe || !of_device_is_available(dn))
157 return NULL; 201 return NULL;
158 202
159 /* Retrieve class/vendor/device IDs */ 203 /* Retrieve class/vendor/device IDs */
@@ -167,9 +211,26 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
167 if (dn->type && !strcmp(dn->type, "isa")) 211 if (dn->type && !strcmp(dn->type, "isa"))
168 return NULL; 212 return NULL;
169 213
170 /* Update class code and mode of eeh device */ 214 /*
215 * Update class code and mode of eeh device. We need
216 * correctly reflects that current device is root port
217 * or PCIe switch downstream port.
218 */
171 edev->class_code = *class_code; 219 edev->class_code = *class_code;
172 edev->mode = 0; 220 edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
221 edev->mode &= 0xFFFFFF00;
222 if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
223 edev->mode |= EEH_DEV_BRIDGE;
224 if (edev->pcie_cap) {
225 rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
226 2, &pcie_flags);
227 pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
228 if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
229 edev->mode |= EEH_DEV_ROOT_PORT;
230 else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
231 edev->mode |= EEH_DEV_DS_PORT;
232 }
233 }
173 234
174 /* Retrieve the device address */ 235 /* Retrieve the device address */
175 regs = of_get_property(dn, "reg", NULL); 236 regs = of_get_property(dn, "reg", NULL);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 02d6e21619bb..8bad880bd177 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -146,7 +146,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
146 flags = 0; 146 flags = 0;
147 147
148 /* Make pHyp happy */ 148 /* Make pHyp happy */
149 if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) 149 if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU))
150 hpte_r &= ~_PAGE_COHERENT; 150 hpte_r &= ~_PAGE_COHERENT;
151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) 151 if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N))
152 flags |= H_COALESCE_CAND; 152 flags |= H_COALESCE_CAND;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 7b3cbde8c783..721c0586b284 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -287,6 +287,9 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
287 unsigned long *savep; 287 unsigned long *savep;
288 struct rtas_error_log *h, *errhdr = NULL; 288 struct rtas_error_log *h, *errhdr = NULL;
289 289
290 /* Mask top two bits */
291 regs->gpr[3] &= ~(0x3UL << 62);
292
290 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { 293 if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
291 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); 294 printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]);
292 return NULL; 295 return NULL;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6b499870662f..b0e6435b2f02 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -91,7 +91,15 @@ struct thread_struct {
91#endif 91#endif
92}; 92};
93 93
94#define PER_FLAG_NO_TE 1UL /* Flag to disable transactions. */ 94/* Flag to disable transactions. */
95#define PER_FLAG_NO_TE 1UL
96/* Flag to enable random transaction aborts. */
97#define PER_FLAG_TE_ABORT_RAND 2UL
98/* Flag to specify random transaction abort mode:
99 * - abort each transaction at a random instruction before TEND if set.
100 * - abort random transactions at a random instruction if cleared.
101 */
102#define PER_FLAG_TE_ABORT_RAND_TEND 4UL
95 103
96typedef struct thread_struct thread_struct; 104typedef struct thread_struct thread_struct;
97 105
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index f3a9e0f92704..80b6f11263c4 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -10,7 +10,7 @@
10#include <linux/thread_info.h> 10#include <linux/thread_info.h>
11 11
12extern struct task_struct *__switch_to(void *, void *); 12extern struct task_struct *__switch_to(void *, void *);
13extern void update_per_regs(struct task_struct *task); 13extern void update_cr_regs(struct task_struct *task);
14 14
15static inline void save_fp_regs(s390_fp_regs *fpregs) 15static inline void save_fp_regs(s390_fp_regs *fpregs)
16{ 16{
@@ -86,7 +86,7 @@ static inline void restore_access_regs(unsigned int *acrs)
86 restore_fp_regs(&next->thread.fp_regs); \ 86 restore_fp_regs(&next->thread.fp_regs); \
87 restore_access_regs(&next->thread.acrs[0]); \ 87 restore_access_regs(&next->thread.acrs[0]); \
88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \ 88 restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
89 update_per_regs(next); \ 89 update_cr_regs(next); \
90 } \ 90 } \
91 prev = __switch_to(prev,next); \ 91 prev = __switch_to(prev,next); \
92} while (0) 92} while (0)
diff --git a/arch/s390/include/uapi/asm/ptrace.h b/arch/s390/include/uapi/asm/ptrace.h
index 3aa9f1ec5b29..7a84619e315e 100644
--- a/arch/s390/include/uapi/asm/ptrace.h
+++ b/arch/s390/include/uapi/asm/ptrace.h
@@ -400,6 +400,7 @@ typedef struct
400#define PTRACE_POKE_SYSTEM_CALL 0x5008 400#define PTRACE_POKE_SYSTEM_CALL 0x5008
401#define PTRACE_ENABLE_TE 0x5009 401#define PTRACE_ENABLE_TE 0x5009
402#define PTRACE_DISABLE_TE 0x5010 402#define PTRACE_DISABLE_TE 0x5010
403#define PTRACE_TE_ABORT_RAND 0x5011
403 404
404/* 405/*
405 * PT_PROT definition is loosely based on hppa bsd definition in 406 * PT_PROT definition is loosely based on hppa bsd definition in
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 64b24650e4f8..dd62071624be 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -173,7 +173,7 @@ error:
173 } 173 }
174} 174}
175 175
176static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu) 176static struct cache_dir *cache_create_cache_dir(int cpu)
177{ 177{
178 struct cache_dir *cache_dir; 178 struct cache_dir *cache_dir;
179 struct kobject *kobj = NULL; 179 struct kobject *kobj = NULL;
@@ -289,9 +289,8 @@ static struct kobj_type cache_index_type = {
289 .default_attrs = cache_index_default_attrs, 289 .default_attrs = cache_index_default_attrs,
290}; 290};
291 291
292static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir, 292static int cache_create_index_dir(struct cache_dir *cache_dir,
293 struct cache *cache, int index, 293 struct cache *cache, int index, int cpu)
294 int cpu)
295{ 294{
296 struct cache_index_dir *index_dir; 295 struct cache_index_dir *index_dir;
297 int rc; 296 int rc;
@@ -313,7 +312,7 @@ out:
313 return rc; 312 return rc;
314} 313}
315 314
316static int __cpuinit cache_add_cpu(int cpu) 315static int cache_add_cpu(int cpu)
317{ 316{
318 struct cache_dir *cache_dir; 317 struct cache_dir *cache_dir;
319 struct cache *cache; 318 struct cache *cache;
@@ -335,7 +334,7 @@ static int __cpuinit cache_add_cpu(int cpu)
335 return 0; 334 return 0;
336} 335}
337 336
338static void __cpuinit cache_remove_cpu(int cpu) 337static void cache_remove_cpu(int cpu)
339{ 338{
340 struct cache_index_dir *index, *next; 339 struct cache_index_dir *index, *next;
341 struct cache_dir *cache_dir; 340 struct cache_dir *cache_dir;
@@ -354,8 +353,8 @@ static void __cpuinit cache_remove_cpu(int cpu)
354 cache_dir_cpu[cpu] = NULL; 353 cache_dir_cpu[cpu] = NULL;
355} 354}
356 355
357static int __cpuinit cache_hotplug(struct notifier_block *nfb, 356static int cache_hotplug(struct notifier_block *nfb, unsigned long action,
358 unsigned long action, void *hcpu) 357 void *hcpu)
359{ 358{
360 int cpu = (long)hcpu; 359 int cpu = (long)hcpu;
361 int rc = 0; 360 int rc = 0;
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f703d91bf720..d8f355657171 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -21,6 +21,48 @@
21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y))) 21#define PTR_SUB(x, y) (((char *) (x)) - ((unsigned long) (y)))
22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y)))) 22#define PTR_DIFF(x, y) ((unsigned long)(((char *) (x)) - ((unsigned long) (y))))
23 23
24
25/*
26 * Return physical address for virtual address
27 */
28static inline void *load_real_addr(void *addr)
29{
30 unsigned long real_addr;
31
32 asm volatile(
33 " lra %0,0(%1)\n"
34 " jz 0f\n"
35 " la %0,0\n"
36 "0:"
37 : "=a" (real_addr) : "a" (addr) : "cc");
38 return (void *)real_addr;
39}
40
41/*
42 * Copy up to one page to vmalloc or real memory
43 */
44static ssize_t copy_page_real(void *buf, void *src, size_t csize)
45{
46 size_t size;
47
48 if (is_vmalloc_addr(buf)) {
49 BUG_ON(csize >= PAGE_SIZE);
50 /* If buf is not page aligned, copy first part */
51 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
52 if (size) {
53 if (memcpy_real(load_real_addr(buf), src, size))
54 return -EFAULT;
55 buf += size;
56 src += size;
57 }
58 /* Copy second part */
59 size = csize - size;
60 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
61 } else {
62 return memcpy_real(buf, src, csize);
63 }
64}
65
24/* 66/*
25 * Copy one page from "oldmem" 67 * Copy one page from "oldmem"
26 * 68 *
@@ -32,6 +74,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
32 size_t csize, unsigned long offset, int userbuf) 74 size_t csize, unsigned long offset, int userbuf)
33{ 75{
34 unsigned long src; 76 unsigned long src;
77 int rc;
35 78
36 if (!csize) 79 if (!csize)
37 return 0; 80 return 0;
@@ -43,11 +86,11 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
43 src < OLDMEM_BASE + OLDMEM_SIZE) 86 src < OLDMEM_BASE + OLDMEM_SIZE)
44 src -= OLDMEM_BASE; 87 src -= OLDMEM_BASE;
45 if (userbuf) 88 if (userbuf)
46 copy_to_user_real((void __force __user *) buf, (void *) src, 89 rc = copy_to_user_real((void __force __user *) buf,
47 csize); 90 (void *) src, csize);
48 else 91 else
49 memcpy_real(buf, (void *) src, csize); 92 rc = copy_page_real(buf, (void *) src, csize);
50 return csize; 93 return (rc == 0) ? csize : rc;
51} 94}
52 95
53/* 96/*
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 390d9ae57bb2..fb99c2057b85 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -639,8 +639,8 @@ static struct pmu cpumf_pmu = {
639 .cancel_txn = cpumf_pmu_cancel_txn, 639 .cancel_txn = cpumf_pmu_cancel_txn,
640}; 640};
641 641
642static int __cpuinit cpumf_pmu_notifier(struct notifier_block *self, 642static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 unsigned int cpu = (long) hcpu; 645 unsigned int cpu = (long) hcpu;
646 int flags; 646 int flags;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 753c41d0ffd3..24612029f450 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
21/* 21/*
22 * cpu_init - initializes state that is per-CPU. 22 * cpu_init - initializes state that is per-CPU.
23 */ 23 */
24void __cpuinit cpu_init(void) 24void cpu_init(void)
25{ 25{
26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 26 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
27 struct cpuid *id = &__get_cpu_var(cpu_id); 27 struct cpuid *id = &__get_cpu_var(cpu_id);
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index a314c57f4e94..e9fadb04e3c6 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -47,7 +47,7 @@ enum s390_regset {
47 REGSET_GENERAL_EXTENDED, 47 REGSET_GENERAL_EXTENDED,
48}; 48};
49 49
50void update_per_regs(struct task_struct *task) 50void update_cr_regs(struct task_struct *task)
51{ 51{
52 struct pt_regs *regs = task_pt_regs(task); 52 struct pt_regs *regs = task_pt_regs(task);
53 struct thread_struct *thread = &task->thread; 53 struct thread_struct *thread = &task->thread;
@@ -56,17 +56,25 @@ void update_per_regs(struct task_struct *task)
56#ifdef CONFIG_64BIT 56#ifdef CONFIG_64BIT
57 /* Take care of the enable/disable of transactional execution. */ 57 /* Take care of the enable/disable of transactional execution. */
58 if (MACHINE_HAS_TE) { 58 if (MACHINE_HAS_TE) {
59 unsigned long cr0, cr0_new; 59 unsigned long cr[3], cr_new[3];
60 60
61 __ctl_store(cr0, 0, 0); 61 __ctl_store(cr, 0, 2);
62 /* set or clear transaction execution bits 8 and 9. */ 62 cr_new[1] = cr[1];
63 /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
63 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
64 cr0_new = cr0 & ~(3UL << 54); 65 cr_new[0] = cr[0] & ~(3UL << 54);
65 else 66 else
66 cr0_new = cr0 | (3UL << 54); 67 cr_new[0] = cr[0] | (3UL << 54);
67 /* Only load control register 0 if necessary. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
68 if (cr0 != cr0_new) 69 cr_new[2] = cr[2] & ~3UL;
69 __ctl_load(cr0_new, 0, 0); 70 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
71 if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND_TEND)
72 cr_new[2] |= 1UL;
73 else
74 cr_new[2] |= 2UL;
75 }
76 if (memcmp(&cr_new, &cr, sizeof(cr)))
77 __ctl_load(cr_new, 0, 2);
70 } 78 }
71#endif 79#endif
72 /* Copy user specified PER registers */ 80 /* Copy user specified PER registers */
@@ -100,14 +108,14 @@ void user_enable_single_step(struct task_struct *task)
100{ 108{
101 set_tsk_thread_flag(task, TIF_SINGLE_STEP); 109 set_tsk_thread_flag(task, TIF_SINGLE_STEP);
102 if (task == current) 110 if (task == current)
103 update_per_regs(task); 111 update_cr_regs(task);
104} 112}
105 113
106void user_disable_single_step(struct task_struct *task) 114void user_disable_single_step(struct task_struct *task)
107{ 115{
108 clear_tsk_thread_flag(task, TIF_SINGLE_STEP); 116 clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
109 if (task == current) 117 if (task == current)
110 update_per_regs(task); 118 update_cr_regs(task);
111} 119}
112 120
113/* 121/*
@@ -447,6 +455,26 @@ long arch_ptrace(struct task_struct *child, long request,
447 if (!MACHINE_HAS_TE) 455 if (!MACHINE_HAS_TE)
448 return -EIO; 456 return -EIO;
449 child->thread.per_flags |= PER_FLAG_NO_TE; 457 child->thread.per_flags |= PER_FLAG_NO_TE;
458 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
459 return 0;
460 case PTRACE_TE_ABORT_RAND:
461 if (!MACHINE_HAS_TE || (child->thread.per_flags & PER_FLAG_NO_TE))
462 return -EIO;
463 switch (data) {
464 case 0UL:
465 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND;
466 break;
467 case 1UL:
468 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
469 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND_TEND;
470 break;
471 case 2UL:
472 child->thread.per_flags |= PER_FLAG_TE_ABORT_RAND;
473 child->thread.per_flags &= ~PER_FLAG_TE_ABORT_RAND_TEND;
474 break;
475 default:
476 return -EINVAL;
477 }
450 return 0; 478 return 0;
451 default: 479 default:
452 /* Removing high order bit from addr (only for 31 bit). */ 480 /* Removing high order bit from addr (only for 31 bit). */
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 15a016c10563..d386c4e9d2e5 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -165,7 +165,7 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
165 pcpu_sigp_retry(pcpu, order, 0); 165 pcpu_sigp_retry(pcpu, order, 0);
166} 166}
167 167
168static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 168static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
169{ 169{
170 struct _lowcore *lc; 170 struct _lowcore *lc;
171 171
@@ -616,10 +616,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void)
616 return info; 616 return info;
617} 617}
618 618
619static int __cpuinit smp_add_present_cpu(int cpu); 619static int smp_add_present_cpu(int cpu);
620 620
621static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, 621static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add)
622 int sysfs_add)
623{ 622{
624 struct pcpu *pcpu; 623 struct pcpu *pcpu;
625 cpumask_t avail; 624 cpumask_t avail;
@@ -685,7 +684,7 @@ static void __init smp_detect_cpus(void)
685/* 684/*
686 * Activate a secondary processor. 685 * Activate a secondary processor.
687 */ 686 */
688static void __cpuinit smp_start_secondary(void *cpuvoid) 687static void smp_start_secondary(void *cpuvoid)
689{ 688{
690 S390_lowcore.last_update_clock = get_tod_clock(); 689 S390_lowcore.last_update_clock = get_tod_clock();
691 S390_lowcore.restart_stack = (unsigned long) restart_stack; 690 S390_lowcore.restart_stack = (unsigned long) restart_stack;
@@ -708,7 +707,7 @@ static void __cpuinit smp_start_secondary(void *cpuvoid)
708} 707}
709 708
710/* Upping and downing of CPUs */ 709/* Upping and downing of CPUs */
711int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 710int __cpu_up(unsigned int cpu, struct task_struct *tidle)
712{ 711{
713 struct pcpu *pcpu; 712 struct pcpu *pcpu;
714 int rc; 713 int rc;
@@ -964,8 +963,8 @@ static struct attribute_group cpu_online_attr_group = {
964 .attrs = cpu_online_attrs, 963 .attrs = cpu_online_attrs,
965}; 964};
966 965
967static int __cpuinit smp_cpu_notify(struct notifier_block *self, 966static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
968 unsigned long action, void *hcpu) 967 void *hcpu)
969{ 968{
970 unsigned int cpu = (unsigned int)(long)hcpu; 969 unsigned int cpu = (unsigned int)(long)hcpu;
971 struct cpu *c = &pcpu_devices[cpu].cpu; 970 struct cpu *c = &pcpu_devices[cpu].cpu;
@@ -983,7 +982,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
983 return notifier_from_errno(err); 982 return notifier_from_errno(err);
984} 983}
985 984
986static int __cpuinit smp_add_present_cpu(int cpu) 985static int smp_add_present_cpu(int cpu)
987{ 986{
988 struct cpu *c = &pcpu_devices[cpu].cpu; 987 struct cpu *c = &pcpu_devices[cpu].cpu;
989 struct device *s = &c->dev; 988 struct device *s = &c->dev;
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 62f89d98e880..811f542b8ed4 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -418,7 +418,7 @@ void s390_adjust_jiffies(void)
418/* 418/*
419 * calibrate the delay loop 419 * calibrate the delay loop
420 */ 420 */
421void __cpuinit calibrate_delay(void) 421void calibrate_delay(void)
422{ 422{
423 s390_adjust_jiffies(); 423 s390_adjust_jiffies();
424 /* Print the good old Bogomips line .. */ 424 /* Print the good old Bogomips line .. */
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 3fb09359eda6..9b9c1b78ec67 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -371,14 +371,14 @@ EXPORT_SYMBOL(del_virt_timer);
371/* 371/*
372 * Start the virtual CPU timer on the current CPU. 372 * Start the virtual CPU timer on the current CPU.
373 */ 373 */
374void __cpuinit init_cpu_vtimer(void) 374void init_cpu_vtimer(void)
375{ 375{
376 /* set initial cpu timer */ 376 /* set initial cpu timer */
377 set_vtimer(VTIMER_MAX_SLICE); 377 set_vtimer(VTIMER_MAX_SLICE);
378} 378}
379 379
380static int __cpuinit s390_nohz_notify(struct notifier_block *self, 380static int s390_nohz_notify(struct notifier_block *self, unsigned long action,
381 unsigned long action, void *hcpu) 381 void *hcpu)
382{ 382{
383 struct s390_idle_data *idle; 383 struct s390_idle_data *idle;
384 long cpu = (long) hcpu; 384 long cpu = (long) hcpu;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 047c3e4c59a2..f00aefb66a4e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -639,8 +639,8 @@ out:
639 put_task_struct(tsk); 639 put_task_struct(tsk);
640} 640}
641 641
642static int __cpuinit pfault_cpu_notify(struct notifier_block *self, 642static int pfault_cpu_notify(struct notifier_block *self, unsigned long action,
643 unsigned long action, void *hcpu) 643 void *hcpu)
644{ 644{
645 struct thread_struct *thread, *next; 645 struct thread_struct *thread, *next;
646 struct task_struct *tsk; 646 struct task_struct *tsk;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 82f165f8078c..d5f10a43a58f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -9,6 +9,8 @@
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
11#include <linux/filter.h> 11#include <linux/filter.h>
12#include <linux/random.h>
13#include <linux/init.h>
12#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
13#include <asm/processor.h> 15#include <asm/processor.h>
14#include <asm/facility.h> 16#include <asm/facility.h>
@@ -221,6 +223,37 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
221 EMIT2(0x07fe); 223 EMIT2(0x07fe);
222} 224}
223 225
226/* Helper to find the offset of pkt_type in sk_buff
227 * Make sure its still a 3bit field starting at the MSBs within a byte.
228 */
229#define PKT_TYPE_MAX 0xe0
230static int pkt_type_offset;
231
232static int __init bpf_pkt_type_offset_init(void)
233{
234 struct sk_buff skb_probe = {
235 .pkt_type = ~0,
236 };
237 char *ct = (char *)&skb_probe;
238 int off;
239
240 pkt_type_offset = -1;
241 for (off = 0; off < sizeof(struct sk_buff); off++) {
242 if (!ct[off])
243 continue;
244 if (ct[off] == PKT_TYPE_MAX)
245 pkt_type_offset = off;
246 else {
247 /* Found non matching bit pattern, fix needed. */
248 WARN_ON_ONCE(1);
249 pkt_type_offset = -1;
250 return -1;
251 }
252 }
253 return 0;
254}
255device_initcall(bpf_pkt_type_offset_init);
256
224/* 257/*
225 * make sure we dont leak kernel information to user 258 * make sure we dont leak kernel information to user
226 */ 259 */
@@ -720,6 +753,16 @@ call_fn: /* lg %r1,<d(function)>(%r13) */
720 EMIT4_DISP(0x88500000, 12); 753 EMIT4_DISP(0x88500000, 12);
721 } 754 }
722 break; 755 break;
756 case BPF_S_ANC_PKTTYPE:
757 if (pkt_type_offset < 0)
758 goto out;
759 /* lhi %r5,0 */
760 EMIT4(0xa7580000);
761 /* ic %r5,<d(pkt_type_offset)>(%r2) */
762 EMIT4_DISP(0x43502000, pkt_type_offset);
763 /* srl %r5,5 */
764 EMIT4_DISP(0x88500000, 5);
765 break;
723 case BPF_S_ANC_CPU: /* A = smp_processor_id() */ 766 case BPF_S_ANC_CPU: /* A = smp_processor_id() */
724#ifdef CONFIG_SMP 767#ifdef CONFIG_SMP
725 /* l %r5,<d(cpu_nr)> */ 768 /* l %r5,<d(cpu_nr)> */
@@ -738,8 +781,41 @@ out:
738 return -1; 781 return -1;
739} 782}
740 783
784/*
785 * Note: for security reasons, bpf code will follow a randomly
786 * sized amount of illegal instructions.
787 */
788struct bpf_binary_header {
789 unsigned int pages;
790 u8 image[];
791};
792
793static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
794 u8 **image_ptr)
795{
796 struct bpf_binary_header *header;
797 unsigned int sz, hole;
798
799 /* Most BPF filters are really small, but if some of them fill a page,
800 * allow at least 128 extra bytes for illegal instructions.
801 */
802 sz = round_up(bpfsize + sizeof(*header) + 128, PAGE_SIZE);
803 header = module_alloc(sz);
804 if (!header)
805 return NULL;
806 memset(header, 0, sz);
807 header->pages = sz / PAGE_SIZE;
808 hole = sz - bpfsize + sizeof(*header);
809 /* Insert random number of illegal instructions before BPF code
810 * and make sure the first instruction starts at an even address.
811 */
812 *image_ptr = &header->image[(prandom_u32() % hole) & -2];
813 return header;
814}
815
741void bpf_jit_compile(struct sk_filter *fp) 816void bpf_jit_compile(struct sk_filter *fp)
742{ 817{
818 struct bpf_binary_header *header = NULL;
743 unsigned long size, prg_len, lit_len; 819 unsigned long size, prg_len, lit_len;
744 struct bpf_jit jit, cjit; 820 struct bpf_jit jit, cjit;
745 unsigned int *addrs; 821 unsigned int *addrs;
@@ -772,12 +848,11 @@ void bpf_jit_compile(struct sk_filter *fp)
772 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) { 848 } else if (jit.prg == cjit.prg && jit.lit == cjit.lit) {
773 prg_len = jit.prg - jit.start; 849 prg_len = jit.prg - jit.start;
774 lit_len = jit.lit - jit.mid; 850 lit_len = jit.lit - jit.mid;
775 size = max_t(unsigned long, prg_len + lit_len, 851 size = prg_len + lit_len;
776 sizeof(struct work_struct));
777 if (size >= BPF_SIZE_MAX) 852 if (size >= BPF_SIZE_MAX)
778 goto out; 853 goto out;
779 jit.start = module_alloc(size); 854 header = bpf_alloc_binary(size, &jit.start);
780 if (!jit.start) 855 if (!header)
781 goto out; 856 goto out;
782 jit.prg = jit.mid = jit.start + prg_len; 857 jit.prg = jit.mid = jit.start + prg_len;
783 jit.lit = jit.end = jit.start + prg_len + lit_len; 858 jit.lit = jit.end = jit.start + prg_len + lit_len;
@@ -788,37 +863,25 @@ void bpf_jit_compile(struct sk_filter *fp)
788 cjit = jit; 863 cjit = jit;
789 } 864 }
790 if (bpf_jit_enable > 1) { 865 if (bpf_jit_enable > 1) {
791 pr_err("flen=%d proglen=%lu pass=%d image=%p\n", 866 bpf_jit_dump(fp->len, jit.end - jit.start, pass, jit.start);
792 fp->len, jit.end - jit.start, pass, jit.start); 867 if (jit.start)
793 if (jit.start) {
794 printk(KERN_ERR "JIT code:\n");
795 print_fn_code(jit.start, jit.mid - jit.start); 868 print_fn_code(jit.start, jit.mid - jit.start);
796 print_hex_dump(KERN_ERR, "JIT literals:\n",
797 DUMP_PREFIX_ADDRESS, 16, 1,
798 jit.mid, jit.end - jit.mid, false);
799 }
800 } 869 }
801 if (jit.start) 870 if (jit.start) {
871 set_memory_ro((unsigned long)header, header->pages);
802 fp->bpf_func = (void *) jit.start; 872 fp->bpf_func = (void *) jit.start;
873 }
803out: 874out:
804 kfree(addrs); 875 kfree(addrs);
805} 876}
806 877
807static void jit_free_defer(struct work_struct *arg)
808{
809 module_free(NULL, arg);
810}
811
812/* run from softirq, we must use a work_struct to call
813 * module_free() from process context
814 */
815void bpf_jit_free(struct sk_filter *fp) 878void bpf_jit_free(struct sk_filter *fp)
816{ 879{
817 struct work_struct *work; 880 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
881 struct bpf_binary_header *header = (void *)addr;
818 882
819 if (fp->bpf_func == sk_run_filter) 883 if (fp->bpf_func == sk_run_filter)
820 return; 884 return;
821 work = (struct work_struct *)fp->bpf_func; 885 set_memory_rw(addr, header->pages);
822 INIT_WORK(work, jit_free_defer); 886 module_free(NULL, header);
823 schedule_work(work);
824} 887}
diff --git a/arch/score/mm/tlb-score.c b/arch/score/mm/tlb-score.c
index 6fdb100244c8..004073717de0 100644
--- a/arch/score/mm/tlb-score.c
+++ b/arch/score/mm/tlb-score.c
@@ -240,7 +240,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
240 local_irq_restore(flags); 240 local_irq_restore(flags);
241} 241}
242 242
243void __cpuinit tlb_init(void) 243void tlb_init(void)
244{ 244{
245 tlblock_set(0); 245 tlblock_set(0);
246 local_flush_tlb_all(); 246 local_flush_tlb_all();
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index 61a07dafcd46..ecf83cd158dc 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -43,9 +43,9 @@
43 * peripherals (nofpu, nodsp, and so forth). 43 * peripherals (nofpu, nodsp, and so forth).
44 */ 44 */
45#define onchip_setup(x) \ 45#define onchip_setup(x) \
46static int x##_disabled __cpuinitdata = !cpu_has_##x; \ 46static int x##_disabled = !cpu_has_##x; \
47 \ 47 \
48static int __cpuinit x##_setup(char *opts) \ 48static int x##_setup(char *opts) \
49{ \ 49{ \
50 x##_disabled = 1; \ 50 x##_disabled = 1; \
51 return 1; \ 51 return 1; \
@@ -59,7 +59,7 @@ onchip_setup(dsp);
59#define CPUOPM 0xff2f0000 59#define CPUOPM 0xff2f0000
60#define CPUOPM_RABD (1 << 5) 60#define CPUOPM_RABD (1 << 5)
61 61
62static void __cpuinit speculative_execution_init(void) 62static void speculative_execution_init(void)
63{ 63{
64 /* Clear RABD */ 64 /* Clear RABD */
65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM); 65 __raw_writel(__raw_readl(CPUOPM) & ~CPUOPM_RABD, CPUOPM);
@@ -78,7 +78,7 @@ static void __cpuinit speculative_execution_init(void)
78#define EXPMASK_BRDSSLP (1 << 1) 78#define EXPMASK_BRDSSLP (1 << 1)
79#define EXPMASK_MMCAW (1 << 4) 79#define EXPMASK_MMCAW (1 << 4)
80 80
81static void __cpuinit expmask_init(void) 81static void expmask_init(void)
82{ 82{
83 unsigned long expmask = __raw_readl(EXPMASK); 83 unsigned long expmask = __raw_readl(EXPMASK);
84 84
@@ -217,7 +217,7 @@ static void detect_cache_shape(void)
217 l2_cache_shape = -1; /* No S-cache */ 217 l2_cache_shape = -1; /* No S-cache */
218} 218}
219 219
220static void __cpuinit fpu_init(void) 220static void fpu_init(void)
221{ 221{
222 /* Disable the FPU */ 222 /* Disable the FPU */
223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) { 223 if (fpu_disabled && (current_cpu_data.flags & CPU_HAS_FPU)) {
@@ -230,7 +230,7 @@ static void __cpuinit fpu_init(void)
230} 230}
231 231
232#ifdef CONFIG_SH_DSP 232#ifdef CONFIG_SH_DSP
233static void __cpuinit release_dsp(void) 233static void release_dsp(void)
234{ 234{
235 unsigned long sr; 235 unsigned long sr;
236 236
@@ -244,7 +244,7 @@ static void __cpuinit release_dsp(void)
244 ); 244 );
245} 245}
246 246
247static void __cpuinit dsp_init(void) 247static void dsp_init(void)
248{ 248{
249 unsigned long sr; 249 unsigned long sr;
250 250
@@ -276,7 +276,7 @@ static void __cpuinit dsp_init(void)
276 release_dsp(); 276 release_dsp();
277} 277}
278#else 278#else
279static inline void __cpuinit dsp_init(void) { } 279static inline void dsp_init(void) { }
280#endif /* CONFIG_SH_DSP */ 280#endif /* CONFIG_SH_DSP */
281 281
282/** 282/**
@@ -295,7 +295,7 @@ static inline void __cpuinit dsp_init(void) { }
295 * Each processor family is still responsible for doing its own probing 295 * Each processor family is still responsible for doing its own probing
296 * and cache configuration in cpu_probe(). 296 * and cache configuration in cpu_probe().
297 */ 297 */
298asmlinkage void __cpuinit cpu_init(void) 298asmlinkage void cpu_init(void)
299{ 299{
300 current_thread_info()->cpu = hard_smp_processor_id(); 300 current_thread_info()->cpu = hard_smp_processor_id();
301 301
diff --git a/arch/sh/kernel/cpu/sh2/probe.c b/arch/sh/kernel/cpu/sh2/probe.c
index bab8e75958ae..6c687ae812ef 100644
--- a/arch/sh/kernel/cpu/sh2/probe.c
+++ b/arch/sh/kernel/cpu/sh2/probe.c
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16void __cpuinit cpu_probe(void) 16void cpu_probe(void)
17{ 17{
18#if defined(CONFIG_CPU_SUBTYPE_SH7619) 18#if defined(CONFIG_CPU_SUBTYPE_SH7619)
19 boot_cpu_data.type = CPU_SH7619; 19 boot_cpu_data.type = CPU_SH7619;
diff --git a/arch/sh/kernel/cpu/sh2a/probe.c b/arch/sh/kernel/cpu/sh2a/probe.c
index 5170b6aa4129..3f87971082f1 100644
--- a/arch/sh/kernel/cpu/sh2a/probe.c
+++ b/arch/sh/kernel/cpu/sh2a/probe.c
@@ -13,7 +13,7 @@
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cache.h> 14#include <asm/cache.h>
15 15
16void __cpuinit cpu_probe(void) 16void cpu_probe(void)
17{ 17{
18 boot_cpu_data.family = CPU_FAMILY_SH2A; 18 boot_cpu_data.family = CPU_FAMILY_SH2A;
19 19
diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c
index bf23c322e164..426e1e1dcedc 100644
--- a/arch/sh/kernel/cpu/sh3/probe.c
+++ b/arch/sh/kernel/cpu/sh3/probe.c
@@ -16,7 +16,7 @@
16#include <asm/cache.h> 16#include <asm/cache.h>
17#include <asm/io.h> 17#include <asm/io.h>
18 18
19void __cpuinit cpu_probe(void) 19void cpu_probe(void)
20{ 20{
21 unsigned long addr0, addr1, data0, data1, data2, data3; 21 unsigned long addr0, addr1, data0, data1, data2, data3;
22 22
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 0fbbd50bc8ad..a521bcf50695 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -15,7 +15,7 @@
15#include <asm/processor.h> 15#include <asm/processor.h>
16#include <asm/cache.h> 16#include <asm/cache.h>
17 17
18void __cpuinit cpu_probe(void) 18void cpu_probe(void)
19{ 19{
20 unsigned long pvr, prr, cvr; 20 unsigned long pvr, prr, cvr;
21 unsigned long size; 21 unsigned long size;
diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
index 03f2b55757cf..4a298808789c 100644
--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
@@ -124,7 +124,7 @@ static void shx3_update_boot_vector(unsigned int cpu)
124 __raw_writel(STBCR_RESET, STBCR_REG(cpu)); 124 __raw_writel(STBCR_RESET, STBCR_REG(cpu));
125} 125}
126 126
127static int __cpuinit 127static int
128shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 128shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
129{ 129{
130 unsigned int cpu = (unsigned int)hcpu; 130 unsigned int cpu = (unsigned int)hcpu;
@@ -143,11 +143,11 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
143 return NOTIFY_OK; 143 return NOTIFY_OK;
144} 144}
145 145
146static struct notifier_block __cpuinitdata shx3_cpu_notifier = { 146static struct notifier_block shx3_cpu_notifier = {
147 .notifier_call = shx3_cpu_callback, 147 .notifier_call = shx3_cpu_callback,
148}; 148};
149 149
150static int __cpuinit register_shx3_cpu_notifier(void) 150static int register_shx3_cpu_notifier(void)
151{ 151{
152 register_hotcpu_notifier(&shx3_cpu_notifier); 152 register_hotcpu_notifier(&shx3_cpu_notifier);
153 return 0; 153 return 0;
diff --git a/arch/sh/kernel/cpu/sh5/probe.c b/arch/sh/kernel/cpu/sh5/probe.c
index 9e882409e4e9..eca427c2f2f3 100644
--- a/arch/sh/kernel/cpu/sh5/probe.c
+++ b/arch/sh/kernel/cpu/sh5/probe.c
@@ -17,7 +17,7 @@
17#include <asm/cache.h> 17#include <asm/cache.h>
18#include <asm/tlb.h> 18#include <asm/tlb.h>
19 19
20void __cpuinit cpu_probe(void) 20void cpu_probe(void)
21{ 21{
22 unsigned long long cir; 22 unsigned long long cir;
23 23
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 068b8a2759b5..b9cefebda55c 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -367,7 +367,7 @@ static void sh_pmu_setup(int cpu)
367 memset(cpuhw, 0, sizeof(struct cpu_hw_events)); 367 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
368} 368}
369 369
370static int __cpuinit 370static int
371sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 371sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
372{ 372{
373 unsigned int cpu = (long)hcpu; 373 unsigned int cpu = (long)hcpu;
@@ -384,7 +384,7 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
384 return NOTIFY_OK; 384 return NOTIFY_OK;
385} 385}
386 386
387int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) 387int register_sh_pmu(struct sh_pmu *_pmu)
388{ 388{
389 if (sh_pmu) 389 if (sh_pmu)
390 return -EBUSY; 390 return -EBUSY;
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 055d91b70305..53bc6c4c84ec 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -65,7 +65,7 @@ void arch_task_cache_init(void)
65# define HAVE_SOFTFP 0 65# define HAVE_SOFTFP 0
66#endif 66#endif
67 67
68void __cpuinit init_thread_xstate(void) 68void init_thread_xstate(void)
69{ 69{
70 if (boot_cpu_data.flags & CPU_HAS_FPU) 70 if (boot_cpu_data.flags & CPU_HAS_FPU)
71 xstate_size = sizeof(struct sh_fpu_hard_struct); 71 xstate_size = sizeof(struct sh_fpu_hard_struct);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index ebe7a7d97215..1cf90e947dbf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -172,7 +172,7 @@ disable:
172#endif 172#endif
173} 173}
174 174
175void __cpuinit calibrate_delay(void) 175void calibrate_delay(void)
176{ 176{
177 struct clk *clk = clk_get(NULL, "cpu_clk"); 177 struct clk *clk = clk_get(NULL, "cpu_clk");
178 178
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index 45696451f0ea..86a7936a980b 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -37,7 +37,7 @@ struct plat_smp_ops *mp_ops = NULL;
37/* State of each CPU */ 37/* State of each CPU */
38DEFINE_PER_CPU(int, cpu_state) = { 0 }; 38DEFINE_PER_CPU(int, cpu_state) = { 0 };
39 39
40void __cpuinit register_smp_ops(struct plat_smp_ops *ops) 40void register_smp_ops(struct plat_smp_ops *ops)
41{ 41{
42 if (mp_ops) 42 if (mp_ops)
43 printk(KERN_WARNING "Overriding previously set SMP ops\n"); 43 printk(KERN_WARNING "Overriding previously set SMP ops\n");
@@ -45,7 +45,7 @@ void __cpuinit register_smp_ops(struct plat_smp_ops *ops)
45 mp_ops = ops; 45 mp_ops = ops;
46} 46}
47 47
48static inline void __cpuinit smp_store_cpu_info(unsigned int cpu) 48static inline void smp_store_cpu_info(unsigned int cpu)
49{ 49{
50 struct sh_cpuinfo *c = cpu_data + cpu; 50 struct sh_cpuinfo *c = cpu_data + cpu;
51 51
@@ -174,7 +174,7 @@ void native_play_dead(void)
174} 174}
175#endif 175#endif
176 176
177asmlinkage void __cpuinit start_secondary(void) 177asmlinkage void start_secondary(void)
178{ 178{
179 unsigned int cpu = smp_processor_id(); 179 unsigned int cpu = smp_processor_id();
180 struct mm_struct *mm = &init_mm; 180 struct mm_struct *mm = &init_mm;
@@ -215,7 +215,7 @@ extern struct {
215 void *thread_info; 215 void *thread_info;
216} stack_start; 216} stack_start;
217 217
218int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tsk) 218int __cpu_up(unsigned int cpu, struct task_struct *tsk)
219{ 219{
220 unsigned long timeout; 220 unsigned long timeout;
221 221
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 5f513a64dedf..68e99f09171d 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -741,7 +741,7 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
741 die_if_kernel("exception", regs, ex); 741 die_if_kernel("exception", regs, ex);
742} 742}
743 743
744void __cpuinit per_cpu_trap_init(void) 744void per_cpu_trap_init(void)
745{ 745{
746 extern void *vbr_base; 746 extern void *vbr_base;
747 747
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index f87d20da1791..112ea11c030d 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -810,7 +810,7 @@ asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 810 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
811} 811}
812 812
813void __cpuinit per_cpu_trap_init(void) 813void per_cpu_trap_init(void)
814{ 814{
815 /* Nothing to do for now, VBR initialization later. */ 815 /* Nothing to do for now, VBR initialization later. */
816} 816}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index ff1c40a31cbc..e4bb2a8e0a69 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -17,7 +17,7 @@
17/** 17/**
18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. 18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
19 */ 19 */
20int __cpuinit sh64_tlb_init(void) 20int sh64_tlb_init(void)
21{ 21{
22 /* Assign some sane DTLB defaults */ 22 /* Assign some sane DTLB defaults */
23 cpu_data->dtlb.entries = 64; 23 cpu_data->dtlb.entries = 64;
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 11d460f6f9cc..62d6b153ffa2 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -528,10 +528,8 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus,
528 } 528 }
529} 529}
530 530
531static int __cpuinit dr_cpu_configure(struct ds_info *dp, 531static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp,
532 struct ds_cap_state *cp, 532 u64 req_num, cpumask_t *mask)
533 u64 req_num,
534 cpumask_t *mask)
535{ 533{
536 struct ds_data *resp; 534 struct ds_data *resp;
537 int resp_len, ncpus, cpu; 535 int resp_len, ncpus, cpu;
@@ -627,9 +625,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp,
627 return 0; 625 return 0;
628} 626}
629 627
630static void __cpuinit dr_cpu_data(struct ds_info *dp, 628static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf,
631 struct ds_cap_state *cp, 629 int len)
632 void *buf, int len)
633{ 630{
634 struct ds_data *data = buf; 631 struct ds_data *data = buf;
635 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); 632 struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1);
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index cc3c5cb47cda..9c179fbfb219 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -250,7 +250,7 @@ extern struct ino_bucket *ivector_table;
250extern unsigned long ivector_table_pa; 250extern unsigned long ivector_table_pa;
251 251
252extern void init_irqwork_curcpu(void); 252extern void init_irqwork_curcpu(void);
253extern void __cpuinit sun4v_register_mondo_queues(int this_cpu); 253extern void sun4v_register_mondo_queues(int this_cpu);
254 254
255#endif /* CONFIG_SPARC32 */ 255#endif /* CONFIG_SPARC32 */
256#endif /* _ENTRY_H */ 256#endif /* _ENTRY_H */
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index 605c960b2fa6..4eb1a5a1d544 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -16,7 +16,6 @@
16#include <asm/asi.h> 16#include <asm/asi.h>
17#include <asm/pil.h> 17#include <asm/pil.h>
18 18
19 __CPUINIT
20 .align 8 19 .align 8
21 .globl hv_cpu_startup, hv_cpu_startup_end 20 .globl hv_cpu_startup, hv_cpu_startup_end
22 21
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 9bcbbe2c4e7e..d4840cec2c55 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -835,7 +835,8 @@ void notrace init_irqwork_curcpu(void)
835 * Therefore you cannot make any OBP calls, not even prom_printf, 835 * Therefore you cannot make any OBP calls, not even prom_printf,
836 * from these two routines. 836 * from these two routines.
837 */ 837 */
838static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) 838static void notrace register_one_mondo(unsigned long paddr, unsigned long type,
839 unsigned long qmask)
839{ 840{
840 unsigned long num_entries = (qmask + 1) / 64; 841 unsigned long num_entries = (qmask + 1) / 64;
841 unsigned long status; 842 unsigned long status;
@@ -848,7 +849,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l
848 } 849 }
849} 850}
850 851
851void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) 852void notrace sun4v_register_mondo_queues(int this_cpu)
852{ 853{
853 struct trap_per_cpu *tb = &trap_block[this_cpu]; 854 struct trap_per_cpu *tb = &trap_block[this_cpu];
854 855
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index d7aa524b7283..6edf955f987c 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -54,7 +54,7 @@ extern ctxd_t *srmmu_ctx_table_phys;
54static int smp_processors_ready; 54static int smp_processors_ready;
55extern volatile unsigned long cpu_callin_map[NR_CPUS]; 55extern volatile unsigned long cpu_callin_map[NR_CPUS];
56extern cpumask_t smp_commenced_mask; 56extern cpumask_t smp_commenced_mask;
57void __cpuinit leon_configure_cache_smp(void); 57void leon_configure_cache_smp(void);
58static void leon_ipi_init(void); 58static void leon_ipi_init(void);
59 59
60/* IRQ number of LEON IPIs */ 60/* IRQ number of LEON IPIs */
@@ -69,12 +69,12 @@ static inline unsigned long do_swap(volatile unsigned long *ptr,
69 return val; 69 return val;
70} 70}
71 71
72void __cpuinit leon_cpu_pre_starting(void *arg) 72void leon_cpu_pre_starting(void *arg)
73{ 73{
74 leon_configure_cache_smp(); 74 leon_configure_cache_smp();
75} 75}
76 76
77void __cpuinit leon_cpu_pre_online(void *arg) 77void leon_cpu_pre_online(void *arg)
78{ 78{
79 int cpuid = hard_smp_processor_id(); 79 int cpuid = hard_smp_processor_id();
80 80
@@ -106,7 +106,7 @@ void __cpuinit leon_cpu_pre_online(void *arg)
106 106
107extern struct linux_prom_registers smp_penguin_ctable; 107extern struct linux_prom_registers smp_penguin_ctable;
108 108
109void __cpuinit leon_configure_cache_smp(void) 109void leon_configure_cache_smp(void)
110{ 110{
111 unsigned long cfg = sparc_leon3_get_dcachecfg(); 111 unsigned long cfg = sparc_leon3_get_dcachecfg();
112 int me = smp_processor_id(); 112 int me = smp_processor_id();
@@ -186,7 +186,7 @@ void __init leon_boot_cpus(void)
186 186
187} 187}
188 188
189int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle) 189int leon_boot_one_cpu(int i, struct task_struct *idle)
190{ 190{
191 int timeout; 191 int timeout;
192 192
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 831c001604e8..b90bf23e3aab 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -571,9 +571,7 @@ static void __init report_platform_properties(void)
571 mdesc_release(hp); 571 mdesc_release(hp);
572} 572}
573 573
574static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, 574static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
575 struct mdesc_handle *hp,
576 u64 mp)
577{ 575{
578 const u64 *level = mdesc_get_property(hp, mp, "level", NULL); 576 const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
579 const u64 *size = mdesc_get_property(hp, mp, "size", NULL); 577 const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
@@ -616,7 +614,7 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c,
616 } 614 }
617} 615}
618 616
619static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) 617static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
620{ 618{
621 u64 a; 619 u64 a;
622 620
@@ -649,7 +647,7 @@ static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id
649 } 647 }
650} 648}
651 649
652static void __cpuinit set_core_ids(struct mdesc_handle *hp) 650static void set_core_ids(struct mdesc_handle *hp)
653{ 651{
654 int idx; 652 int idx;
655 u64 mp; 653 u64 mp;
@@ -674,7 +672,7 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp)
674 } 672 }
675} 673}
676 674
677static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) 675static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
678{ 676{
679 u64 a; 677 u64 a;
680 678
@@ -693,7 +691,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id
693 } 691 }
694} 692}
695 693
696static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) 694static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
697{ 695{
698 int idx; 696 int idx;
699 u64 mp; 697 u64 mp;
@@ -714,14 +712,14 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u
714 } 712 }
715} 713}
716 714
717static void __cpuinit set_proc_ids(struct mdesc_handle *hp) 715static void set_proc_ids(struct mdesc_handle *hp)
718{ 716{
719 __set_proc_ids(hp, "exec_unit"); 717 __set_proc_ids(hp, "exec_unit");
720 __set_proc_ids(hp, "exec-unit"); 718 __set_proc_ids(hp, "exec-unit");
721} 719}
722 720
723static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, 721static void get_one_mondo_bits(const u64 *p, unsigned int *mask,
724 unsigned long def, unsigned long max) 722 unsigned long def, unsigned long max)
725{ 723{
726 u64 val; 724 u64 val;
727 725
@@ -742,8 +740,8 @@ use_default:
742 *mask = ((1U << def) * 64U) - 1U; 740 *mask = ((1U << def) * 64U) - 1U;
743} 741}
744 742
745static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, 743static void get_mondo_data(struct mdesc_handle *hp, u64 mp,
746 struct trap_per_cpu *tb) 744 struct trap_per_cpu *tb)
747{ 745{
748 static int printed; 746 static int printed;
749 const u64 *val; 747 const u64 *val;
@@ -769,7 +767,7 @@ static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
769 } 767 }
770} 768}
771 769
772static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) 770static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask)
773{ 771{
774 struct mdesc_handle *hp = mdesc_grab(); 772 struct mdesc_handle *hp = mdesc_grab();
775 void *ret = NULL; 773 void *ret = NULL;
@@ -799,7 +797,8 @@ out:
799 return ret; 797 return ret;
800} 798}
801 799
802static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) 800static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
801 void *arg)
803{ 802{
804 ncpus_probed++; 803 ncpus_probed++;
805#ifdef CONFIG_SMP 804#ifdef CONFIG_SMP
@@ -808,7 +807,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui
808 return NULL; 807 return NULL;
809} 808}
810 809
811void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) 810void mdesc_populate_present_mask(cpumask_t *mask)
812{ 811{
813 if (tlb_type != hypervisor) 812 if (tlb_type != hypervisor)
814 return; 813 return;
@@ -841,7 +840,8 @@ void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask)
841 mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask); 840 mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask);
842} 841}
843 842
844static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) 843static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid,
844 void *arg)
845{ 845{
846 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); 846 const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
847 struct trap_per_cpu *tb; 847 struct trap_per_cpu *tb;
@@ -890,7 +890,7 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu
890 return NULL; 890 return NULL;
891} 891}
892 892
893void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) 893void mdesc_fill_in_cpu_data(cpumask_t *mask)
894{ 894{
895 struct mdesc_handle *hp; 895 struct mdesc_handle *hp;
896 896
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index e3f2b81c23f1..a102bfba6ea8 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -39,7 +39,7 @@
39#include "kernel.h" 39#include "kernel.h"
40#include "irq.h" 40#include "irq.h"
41 41
42volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; 42volatile unsigned long cpu_callin_map[NR_CPUS] = {0,};
43 43
44cpumask_t smp_commenced_mask = CPU_MASK_NONE; 44cpumask_t smp_commenced_mask = CPU_MASK_NONE;
45 45
@@ -53,7 +53,7 @@ const struct sparc32_ipi_ops *sparc32_ipi_ops;
53 * instruction which is much better... 53 * instruction which is much better...
54 */ 54 */
55 55
56void __cpuinit smp_store_cpu_info(int id) 56void smp_store_cpu_info(int id)
57{ 57{
58 int cpu_node; 58 int cpu_node;
59 int mid; 59 int mid;
@@ -120,7 +120,7 @@ void cpu_panic(void)
120 panic("SMP bolixed\n"); 120 panic("SMP bolixed\n");
121} 121}
122 122
123struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; 123struct linux_prom_registers smp_penguin_ctable = { 0 };
124 124
125void smp_send_reschedule(int cpu) 125void smp_send_reschedule(int cpu)
126{ 126{
@@ -259,10 +259,10 @@ void __init smp_prepare_boot_cpu(void)
259 set_cpu_possible(cpuid, true); 259 set_cpu_possible(cpuid, true);
260} 260}
261 261
262int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 262int __cpu_up(unsigned int cpu, struct task_struct *tidle)
263{ 263{
264 extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *); 264 extern int smp4m_boot_one_cpu(int, struct task_struct *);
265 extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *); 265 extern int smp4d_boot_one_cpu(int, struct task_struct *);
266 int ret=0; 266 int ret=0;
267 267
268 switch(sparc_cpu_model) { 268 switch(sparc_cpu_model) {
@@ -297,7 +297,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
297 return ret; 297 return ret;
298} 298}
299 299
300void __cpuinit arch_cpu_pre_starting(void *arg) 300void arch_cpu_pre_starting(void *arg)
301{ 301{
302 local_ops->cache_all(); 302 local_ops->cache_all();
303 local_ops->tlb_all(); 303 local_ops->tlb_all();
@@ -317,7 +317,7 @@ void __cpuinit arch_cpu_pre_starting(void *arg)
317 } 317 }
318} 318}
319 319
320void __cpuinit arch_cpu_pre_online(void *arg) 320void arch_cpu_pre_online(void *arg)
321{ 321{
322 unsigned int cpuid = hard_smp_processor_id(); 322 unsigned int cpuid = hard_smp_processor_id();
323 323
@@ -344,7 +344,7 @@ void __cpuinit arch_cpu_pre_online(void *arg)
344 } 344 }
345} 345}
346 346
347void __cpuinit sparc_start_secondary(void *arg) 347void sparc_start_secondary(void *arg)
348{ 348{
349 unsigned int cpu; 349 unsigned int cpu;
350 350
@@ -375,7 +375,7 @@ void __cpuinit sparc_start_secondary(void *arg)
375 BUG(); 375 BUG();
376} 376}
377 377
378void __cpuinit smp_callin(void) 378void smp_callin(void)
379{ 379{
380 sparc_start_secondary(NULL); 380 sparc_start_secondary(NULL);
381} 381}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 77539eda928c..e142545244f2 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -87,7 +87,7 @@ extern void setup_sparc64_timer(void);
87 87
88static volatile unsigned long callin_flag = 0; 88static volatile unsigned long callin_flag = 0;
89 89
90void __cpuinit smp_callin(void) 90void smp_callin(void)
91{ 91{
92 int cpuid = hard_smp_processor_id(); 92 int cpuid = hard_smp_processor_id();
93 93
@@ -281,7 +281,8 @@ static unsigned long kimage_addr_to_ra(void *p)
281 return kern_base + (val - KERNBASE); 281 return kern_base + (val - KERNBASE);
282} 282}
283 283
284static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) 284static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
285 void **descrp)
285{ 286{
286 extern unsigned long sparc64_ttable_tl0; 287 extern unsigned long sparc64_ttable_tl0;
287 extern unsigned long kern_locked_tte_data; 288 extern unsigned long kern_locked_tte_data;
@@ -342,7 +343,7 @@ extern unsigned long sparc64_cpu_startup;
342 */ 343 */
343static struct thread_info *cpu_new_thread = NULL; 344static struct thread_info *cpu_new_thread = NULL;
344 345
345static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) 346static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle)
346{ 347{
347 unsigned long entry = 348 unsigned long entry =
348 (unsigned long)(&sparc64_cpu_startup); 349 (unsigned long)(&sparc64_cpu_startup);
@@ -1266,7 +1267,7 @@ void smp_fill_in_sib_core_maps(void)
1266 } 1267 }
1267} 1268}
1268 1269
1269int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 1270int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1270{ 1271{
1271 int ret = smp_boot_one_cpu(cpu, tidle); 1272 int ret = smp_boot_one_cpu(cpu, tidle);
1272 1273
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index c9eb82f23d92..d5c319553fd0 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -50,7 +50,7 @@ static inline void show_leds(int cpuid)
50 "i" (ASI_M_CTL)); 50 "i" (ASI_M_CTL));
51} 51}
52 52
53void __cpuinit sun4d_cpu_pre_starting(void *arg) 53void sun4d_cpu_pre_starting(void *arg)
54{ 54{
55 int cpuid = hard_smp_processor_id(); 55 int cpuid = hard_smp_processor_id();
56 56
@@ -62,7 +62,7 @@ void __cpuinit sun4d_cpu_pre_starting(void *arg)
62 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); 62 cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000);
63} 63}
64 64
65void __cpuinit sun4d_cpu_pre_online(void *arg) 65void sun4d_cpu_pre_online(void *arg)
66{ 66{
67 unsigned long flags; 67 unsigned long flags;
68 int cpuid; 68 int cpuid;
@@ -118,7 +118,7 @@ void __init smp4d_boot_cpus(void)
118 local_ops->cache_all(); 118 local_ops->cache_all();
119} 119}
120 120
121int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle) 121int smp4d_boot_one_cpu(int i, struct task_struct *idle)
122{ 122{
123 unsigned long *entry = &sun4d_cpu_startup; 123 unsigned long *entry = &sun4d_cpu_startup;
124 int timeout; 124 int timeout;
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c
index 8a65f158153d..d3408e72d20c 100644
--- a/arch/sparc/kernel/sun4m_smp.c
+++ b/arch/sparc/kernel/sun4m_smp.c
@@ -34,11 +34,11 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val)
34 return val; 34 return val;
35} 35}
36 36
37void __cpuinit sun4m_cpu_pre_starting(void *arg) 37void sun4m_cpu_pre_starting(void *arg)
38{ 38{
39} 39}
40 40
41void __cpuinit sun4m_cpu_pre_online(void *arg) 41void sun4m_cpu_pre_online(void *arg)
42{ 42{
43 int cpuid = hard_smp_processor_id(); 43 int cpuid = hard_smp_processor_id();
44 44
@@ -75,7 +75,7 @@ void __init smp4m_boot_cpus(void)
75 local_ops->cache_all(); 75 local_ops->cache_all();
76} 76}
77 77
78int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle) 78int smp4m_boot_one_cpu(int i, struct task_struct *idle)
79{ 79{
80 unsigned long *entry = &sun4m_cpu_startup; 80 unsigned long *entry = &sun4m_cpu_startup;
81 int timeout; 81 int timeout;
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index 654e8aad3bbe..c21c673e5f7c 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -246,7 +246,7 @@ static void unregister_cpu_online(unsigned int cpu)
246} 246}
247#endif 247#endif
248 248
249static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, 249static int sysfs_cpu_notify(struct notifier_block *self,
250 unsigned long action, void *hcpu) 250 unsigned long action, void *hcpu)
251{ 251{
252 unsigned int cpu = (unsigned int)(long)hcpu; 252 unsigned int cpu = (unsigned int)(long)hcpu;
@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
266 return NOTIFY_OK; 266 return NOTIFY_OK;
267} 267}
268 268
269static struct notifier_block __cpuinitdata sysfs_cpu_nb = { 269static struct notifier_block sysfs_cpu_nb = {
270 .notifier_call = sysfs_cpu_notify, 270 .notifier_call = sysfs_cpu_notify,
271}; 271};
272 272
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 6cdb08cdabf0..76dcbd3c988a 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -18,7 +18,6 @@
18 .globl sun4m_cpu_startup 18 .globl sun4m_cpu_startup
19 .globl sun4d_cpu_startup 19 .globl sun4d_cpu_startup
20 20
21 __CPUINIT
22 .align 4 21 .align 4
23 22
24/* When we start up a cpu for the first time it enters this routine. 23/* When we start up a cpu for the first time it enters this routine.
@@ -94,7 +93,6 @@ smp_panic:
94/* CPUID in bootbus can be found at PA 0xff0140000 */ 93/* CPUID in bootbus can be found at PA 0xff0140000 */
95#define SUN4D_BOOTBUS_CPUID 0xf0140000 94#define SUN4D_BOOTBUS_CPUID 0xf0140000
96 95
97 __CPUINIT
98 .align 4 96 .align 4
99 97
100sun4d_cpu_startup: 98sun4d_cpu_startup:
@@ -146,7 +144,6 @@ sun4d_cpu_startup:
146 144
147 b,a smp_panic 145 b,a smp_panic
148 146
149 __CPUINIT
150 .align 4 147 .align 4
151 .global leon_smp_cpu_startup, smp_penguin_ctable 148 .global leon_smp_cpu_startup, smp_penguin_ctable
152 149
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 2e973a26fbda..e0b1e13a0736 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -32,13 +32,11 @@ itlb_load:
32dtlb_load: 32dtlb_load:
33 .asciz "SUNW,dtlb-load" 33 .asciz "SUNW,dtlb-load"
34 34
35 /* XXX __cpuinit this thing XXX */
36#define TRAMP_STACK_SIZE 1024 35#define TRAMP_STACK_SIZE 1024
37 .align 16 36 .align 16
38tramp_stack: 37tramp_stack:
39 .skip TRAMP_STACK_SIZE 38 .skip TRAMP_STACK_SIZE
40 39
41 __CPUINIT
42 .align 8 40 .align 8
43 .globl sparc64_cpu_startup, sparc64_cpu_startup_end 41 .globl sparc64_cpu_startup, sparc64_cpu_startup_end
44sparc64_cpu_startup: 42sparc64_cpu_startup:
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index a9c42a7ffb6a..ed82edad1a39 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1694,7 +1694,7 @@ static void __init sun4v_ktsb_init(void)
1694#endif 1694#endif
1695} 1695}
1696 1696
1697void __cpuinit sun4v_ktsb_register(void) 1697void sun4v_ktsb_register(void)
1698{ 1698{
1699 unsigned long pa, ret; 1699 unsigned long pa, ret;
1700 1700
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 036c2797dece..5d721df48a72 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -858,7 +858,7 @@ static void __init map_kernel(void)
858 } 858 }
859} 859}
860 860
861void (*poke_srmmu)(void) __cpuinitdata = NULL; 861void (*poke_srmmu)(void) = NULL;
862 862
863extern unsigned long bootmem_init(unsigned long *pages_avail); 863extern unsigned long bootmem_init(unsigned long *pages_avail);
864 864
@@ -1055,7 +1055,7 @@ static void __init init_vac_layout(void)
1055 (int)vac_cache_size, (int)vac_line_size); 1055 (int)vac_cache_size, (int)vac_line_size);
1056} 1056}
1057 1057
1058static void __cpuinit poke_hypersparc(void) 1058static void poke_hypersparc(void)
1059{ 1059{
1060 volatile unsigned long clear; 1060 volatile unsigned long clear;
1061 unsigned long mreg = srmmu_get_mmureg(); 1061 unsigned long mreg = srmmu_get_mmureg();
@@ -1107,7 +1107,7 @@ static void __init init_hypersparc(void)
1107 hypersparc_setup_blockops(); 1107 hypersparc_setup_blockops();
1108} 1108}
1109 1109
1110static void __cpuinit poke_swift(void) 1110static void poke_swift(void)
1111{ 1111{
1112 unsigned long mreg; 1112 unsigned long mreg;
1113 1113
@@ -1287,7 +1287,7 @@ static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long
1287} 1287}
1288 1288
1289 1289
1290static void __cpuinit poke_turbosparc(void) 1290static void poke_turbosparc(void)
1291{ 1291{
1292 unsigned long mreg = srmmu_get_mmureg(); 1292 unsigned long mreg = srmmu_get_mmureg();
1293 unsigned long ccreg; 1293 unsigned long ccreg;
@@ -1350,7 +1350,7 @@ static void __init init_turbosparc(void)
1350 poke_srmmu = poke_turbosparc; 1350 poke_srmmu = poke_turbosparc;
1351} 1351}
1352 1352
1353static void __cpuinit poke_tsunami(void) 1353static void poke_tsunami(void)
1354{ 1354{
1355 unsigned long mreg = srmmu_get_mmureg(); 1355 unsigned long mreg = srmmu_get_mmureg();
1356 1356
@@ -1391,7 +1391,7 @@ static void __init init_tsunami(void)
1391 tsunami_setup_blockops(); 1391 tsunami_setup_blockops();
1392} 1392}
1393 1393
1394static void __cpuinit poke_viking(void) 1394static void poke_viking(void)
1395{ 1395{
1396 unsigned long mreg = srmmu_get_mmureg(); 1396 unsigned long mreg = srmmu_get_mmureg();
1397 static int smp_catch; 1397 static int smp_catch;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 02e628065012..3ccf2cd7182e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -220,7 +220,7 @@ void __init init_IRQ(void)
220 ipi_init(); 220 ipi_init();
221} 221}
222 222
223void __cpuinit setup_irq_regs(void) 223void setup_irq_regs(void)
224{ 224{
225 /* Enable interrupt delivery. */ 225 /* Enable interrupt delivery. */
226 unmask_irqs(~0UL); 226 unmask_irqs(~0UL);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 0858ee6b520f..00331af9525d 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -25,7 +25,7 @@
25/* All messages are stored here */ 25/* All messages are stored here */
26static DEFINE_PER_CPU(HV_MsgState, msg_state); 26static DEFINE_PER_CPU(HV_MsgState, msg_state);
27 27
28void __cpuinit init_messaging(void) 28void init_messaging(void)
29{ 29{
30 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
31 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = &__get_cpu_var(msg_state);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 68b542677f6a..eceb8344280f 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -58,8 +58,8 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
58EXPORT_SYMBOL(node_data); 58EXPORT_SYMBOL(node_data);
59 59
60/* Information on the NUMA nodes that we compute early */ 60/* Information on the NUMA nodes that we compute early */
61unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; 61unsigned long node_start_pfn[MAX_NUMNODES];
62unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; 62unsigned long node_end_pfn[MAX_NUMNODES];
63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; 63unsigned long __initdata node_memmap_pfn[MAX_NUMNODES];
64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; 64unsigned long __initdata node_percpu_pfn[MAX_NUMNODES];
65unsigned long __initdata node_free_pfn[MAX_NUMNODES]; 65unsigned long __initdata node_free_pfn[MAX_NUMNODES];
@@ -84,7 +84,7 @@ unsigned long __initdata boot_pc = (unsigned long)start_kernel;
84 84
85#ifdef CONFIG_HIGHMEM 85#ifdef CONFIG_HIGHMEM
86/* Page frame index of end of lowmem on each controller. */ 86/* Page frame index of end of lowmem on each controller. */
87unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; 87unsigned long node_lowmem_end_pfn[MAX_NUMNODES];
88 88
89/* Number of pages that can be mapped into lowmem. */ 89/* Number of pages that can be mapped into lowmem. */
90static unsigned long __initdata mappable_physpages; 90static unsigned long __initdata mappable_physpages;
@@ -290,7 +290,7 @@ static void *__init setup_pa_va_mapping(void)
290 * This is up to 4 mappings for lowmem, one mapping per memory 290 * This is up to 4 mappings for lowmem, one mapping per memory
291 * controller, plus one for our text segment. 291 * controller, plus one for our text segment.
292 */ 292 */
293static void __cpuinit store_permanent_mappings(void) 293static void store_permanent_mappings(void)
294{ 294{
295 int i; 295 int i;
296 296
@@ -935,7 +935,7 @@ subsys_initcall(topology_init);
935 * So the values we set up here in the hypervisor may be overridden on 935 * So the values we set up here in the hypervisor may be overridden on
936 * the boot cpu as arguments are parsed. 936 * the boot cpu as arguments are parsed.
937 */ 937 */
938static __cpuinit void init_super_pages(void) 938static void init_super_pages(void)
939{ 939{
940#ifdef CONFIG_HUGETLB_SUPER_PAGES 940#ifdef CONFIG_HUGETLB_SUPER_PAGES
941 int i; 941 int i;
@@ -950,7 +950,7 @@ static __cpuinit void init_super_pages(void)
950 * 950 *
951 * Called from setup_arch() on the boot cpu, or online_secondary(). 951 * Called from setup_arch() on the boot cpu, or online_secondary().
952 */ 952 */
953void __cpuinit setup_cpu(int boot) 953void setup_cpu(int boot)
954{ 954{
955 /* The boot cpu sets up its permanent mappings much earlier. */ 955 /* The boot cpu sets up its permanent mappings much earlier. */
956 if (!boot) 956 if (!boot)
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 44bab29bf2f3..a535655b7089 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -133,14 +133,14 @@ static __init int reset_init_affinity(void)
133} 133}
134late_initcall(reset_init_affinity); 134late_initcall(reset_init_affinity);
135 135
136static struct cpumask cpu_started __cpuinitdata; 136static struct cpumask cpu_started;
137 137
138/* 138/*
139 * Activate a secondary processor. Very minimal; don't add anything 139 * Activate a secondary processor. Very minimal; don't add anything
140 * to this path without knowing what you're doing, since SMP booting 140 * to this path without knowing what you're doing, since SMP booting
141 * is pretty fragile. 141 * is pretty fragile.
142 */ 142 */
143static void __cpuinit start_secondary(void) 143static void start_secondary(void)
144{ 144{
145 int cpuid = smp_processor_id(); 145 int cpuid = smp_processor_id();
146 146
@@ -183,7 +183,7 @@ static void __cpuinit start_secondary(void)
183/* 183/*
184 * Bring a secondary processor online. 184 * Bring a secondary processor online.
185 */ 185 */
186void __cpuinit online_secondary(void) 186void online_secondary(void)
187{ 187{
188 /* 188 /*
189 * low-memory mappings have been cleared, flush them from 189 * low-memory mappings have been cleared, flush them from
@@ -210,7 +210,7 @@ void __cpuinit online_secondary(void)
210 cpu_startup_entry(CPUHP_ONLINE); 210 cpu_startup_entry(CPUHP_ONLINE);
211} 211}
212 212
213int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) 213int __cpu_up(unsigned int cpu, struct task_struct *tidle)
214{ 214{
215 /* Wait 5s total for all CPUs for them to come online */ 215 /* Wait 5s total for all CPUs for them to come online */
216 static int timeout; 216 static int timeout;
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 5ac397ec6986..7c353d8c2da9 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -159,7 +159,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
159 .set_mode = tile_timer_set_mode, 159 .set_mode = tile_timer_set_mode,
160}; 160};
161 161
162void __cpuinit setup_tile_timer(void) 162void setup_tile_timer(void)
163{ 163{
164 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 164 struct clock_event_device *evt = &__get_cpu_var(tile_timer);
165 165
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index e584e40ee832..f2ca5702a4e2 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,13 +6,13 @@
6#ifndef __FRAME_KERN_H_ 6#ifndef __FRAME_KERN_H_
7#define __FRAME_KERN_H_ 7#define __FRAME_KERN_H_
8 8
9extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 9extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
10 struct k_sigaction *ka, 10 struct k_sigaction *ka,
11 struct pt_regs *regs, 11 struct pt_regs *regs,
12 sigset_t *mask); 12 sigset_t *mask);
13extern int setup_signal_stack_si(unsigned long stack_top, int sig, 13extern int setup_signal_stack_si(unsigned long stack_top, int sig,
14 struct k_sigaction *ka, 14 struct k_sigaction *ka,
15 struct pt_regs *regs, siginfo_t *info, 15 struct pt_regs *regs, struct siginfo *info,
16 sigset_t *mask); 16 sigset_t *mask);
17 17
18#endif 18#endif
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 3e831b3fd07b..f57e02e7910f 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -19,7 +19,7 @@ EXPORT_SYMBOL(unblock_signals);
19 * OK, we're invoking a handler 19 * OK, we're invoking a handler
20 */ 20 */
21static void handle_signal(struct pt_regs *regs, unsigned long signr, 21static void handle_signal(struct pt_regs *regs, unsigned long signr,
22 struct k_sigaction *ka, siginfo_t *info) 22 struct k_sigaction *ka, struct siginfo *info)
23{ 23{
24 sigset_t *oldset = sigmask_to_save(); 24 sigset_t *oldset = sigmask_to_save();
25 int singlestep = 0; 25 int singlestep = 0;
@@ -71,7 +71,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
71static int kern_do_signal(struct pt_regs *regs) 71static int kern_do_signal(struct pt_regs *regs)
72{ 72{
73 struct k_sigaction ka_copy; 73 struct k_sigaction ka_copy;
74 siginfo_t info; 74 struct siginfo info;
75 int sig, handled_sig = 0; 75 int sig, handled_sig = 0;
76 76
77 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { 77 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index ff03067a3b14..007d5503f49b 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -123,7 +123,7 @@ void uml_setup_stubs(struct mm_struct *mm)
123 /* dup_mmap already holds mmap_sem */ 123 /* dup_mmap already holds mmap_sem */
124 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START, 124 err = install_special_mapping(mm, STUB_START, STUB_END - STUB_START,
125 VM_READ | VM_MAYREAD | VM_EXEC | 125 VM_READ | VM_MAYREAD | VM_EXEC |
126 VM_MAYEXEC | VM_DONTCOPY, 126 VM_MAYEXEC | VM_DONTCOPY | VM_PFNMAP,
127 mm->context.stub_pages); 127 mm->context.stub_pages);
128 if (err) { 128 if (err) {
129 printk(KERN_ERR "install_special_mapping returned %d\n", err); 129 printk(KERN_ERR "install_special_mapping returned %d\n", err);
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 1d3e0c17340b..4ffb644d6c07 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -254,6 +254,6 @@ int strnlen_user(const void __user *str, int len)
254 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); 254 n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count);
255 if (n == 0) 255 if (n == 0)
256 return count + 1; 256 return count + 1;
257 return -EFAULT; 257 return 0;
258} 258}
259EXPORT_SYMBOL(strnlen_user); 259EXPORT_SYMBOL(strnlen_user);
diff --git a/arch/um/os-Linux/mem.c b/arch/um/os-Linux/mem.c
index ba4398056fe9..3c4af77e51a2 100644
--- a/arch/um/os-Linux/mem.c
+++ b/arch/um/os-Linux/mem.c
@@ -53,6 +53,25 @@ static void __init find_tempdir(void)
53} 53}
54 54
55/* 55/*
56 * Remove bytes from the front of the buffer and refill it so that if there's a
57 * partial string that we care about, it will be completed, and we can recognize
58 * it.
59 */
60static int pop(int fd, char *buf, size_t size, size_t npop)
61{
62 ssize_t n;
63 size_t len = strlen(&buf[npop]);
64
65 memmove(buf, &buf[npop], len + 1);
66 n = read(fd, &buf[len], size - len - 1);
67 if (n < 0)
68 return -errno;
69
70 buf[len + n] = '\0';
71 return 1;
72}
73
74/*
56 * This will return 1, with the first character in buf being the 75 * This will return 1, with the first character in buf being the
57 * character following the next instance of c in the file. This will 76 * character following the next instance of c in the file. This will
58 * read the file as needed. If there's an error, -errno is returned; 77 * read the file as needed. If there's an error, -errno is returned;
@@ -61,7 +80,6 @@ static void __init find_tempdir(void)
61static int next(int fd, char *buf, size_t size, char c) 80static int next(int fd, char *buf, size_t size, char c)
62{ 81{
63 ssize_t n; 82 ssize_t n;
64 size_t len;
65 char *ptr; 83 char *ptr;
66 84
67 while ((ptr = strchr(buf, c)) == NULL) { 85 while ((ptr = strchr(buf, c)) == NULL) {
@@ -74,20 +92,129 @@ static int next(int fd, char *buf, size_t size, char c)
74 buf[n] = '\0'; 92 buf[n] = '\0';
75 } 93 }
76 94
77 ptr++; 95 return pop(fd, buf, size, ptr - buf + 1);
78 len = strlen(ptr); 96}
79 memmove(buf, ptr, len + 1); 97
98/*
99 * Decode an octal-escaped and space-terminated path of the form used by
100 * /proc/mounts. May be used to decode a path in-place. "out" must be at least
101 * as large as the input. The output is always null-terminated. "len" gets the
102 * length of the output, excluding the trailing null. Returns 0 if a full path
103 * was successfully decoded, otherwise an error.
104 */
105static int decode_path(const char *in, char *out, size_t *len)
106{
107 char *first = out;
108 int c;
109 int i;
110 int ret = -EINVAL;
111 while (1) {
112 switch (*in) {
113 case '\0':
114 goto out;
115
116 case ' ':
117 ret = 0;
118 goto out;
119
120 case '\\':
121 in++;
122 c = 0;
123 for (i = 0; i < 3; i++) {
124 if (*in < '0' || *in > '7')
125 goto out;
126 c = (c << 3) | (*in++ - '0');
127 }
128 *(unsigned char *)out++ = (unsigned char) c;
129 break;
130
131 default:
132 *out++ = *in++;
133 break;
134 }
135 }
136
137out:
138 *out = '\0';
139 *len = out - first;
140 return ret;
141}
142
143/*
144 * Computes the length of s when encoded with three-digit octal escape sequences
145 * for the characters in chars.
146 */
147static size_t octal_encoded_length(const char *s, const char *chars)
148{
149 size_t len = strlen(s);
150 while ((s = strpbrk(s, chars)) != NULL) {
151 len += 3;
152 s++;
153 }
154
155 return len;
156}
157
158enum {
159 OUTCOME_NOTHING_MOUNTED,
160 OUTCOME_TMPFS_MOUNT,
161 OUTCOME_NON_TMPFS_MOUNT,
162};
163
164/* Read a line of /proc/mounts data looking for a tmpfs mount at "path". */
165static int read_mount(int fd, char *buf, size_t bufsize, const char *path,
166 int *outcome)
167{
168 int found;
169 int match;
170 char *space;
171 size_t len;
172
173 enum {
174 MATCH_NONE,
175 MATCH_EXACT,
176 MATCH_PARENT,
177 };
178
179 found = next(fd, buf, bufsize, ' ');
180 if (found != 1)
181 return found;
80 182
81 /* 183 /*
82 * Refill the buffer so that if there's a partial string that we care 184 * If there's no following space in the buffer, then this path is
83 * about, it will be completed, and we can recognize it. 185 * truncated, so it can't be the one we're looking for.
84 */ 186 */
85 n = read(fd, &buf[len], size - len - 1); 187 space = strchr(buf, ' ');
86 if (n < 0) 188 if (space) {
87 return -errno; 189 match = MATCH_NONE;
190 if (!decode_path(buf, buf, &len)) {
191 if (!strcmp(buf, path))
192 match = MATCH_EXACT;
193 else if (!strncmp(buf, path, len)
194 && (path[len] == '/' || !strcmp(buf, "/")))
195 match = MATCH_PARENT;
196 }
197
198 found = pop(fd, buf, bufsize, space - buf + 1);
199 if (found != 1)
200 return found;
201
202 switch (match) {
203 case MATCH_EXACT:
204 if (!strncmp(buf, "tmpfs", strlen("tmpfs")))
205 *outcome = OUTCOME_TMPFS_MOUNT;
206 else
207 *outcome = OUTCOME_NON_TMPFS_MOUNT;
208 break;
88 209
89 buf[len + n] = '\0'; 210 case MATCH_PARENT:
90 return 1; 211 /* This mount obscures any previous ones. */
212 *outcome = OUTCOME_NOTHING_MOUNTED;
213 break;
214 }
215 }
216
217 return next(fd, buf, bufsize, '\n');
91} 218}
92 219
93/* which_tmpdir is called only during early boot */ 220/* which_tmpdir is called only during early boot */
@@ -106,8 +233,12 @@ static int checked_tmpdir = 0;
106 */ 233 */
107static void which_tmpdir(void) 234static void which_tmpdir(void)
108{ 235{
109 int fd, found; 236 int fd;
110 char buf[128] = { '\0' }; 237 int found;
238 int outcome;
239 char *path;
240 char *buf;
241 size_t bufsize;
111 242
112 if (checked_tmpdir) 243 if (checked_tmpdir)
113 return; 244 return;
@@ -116,49 +247,66 @@ static void which_tmpdir(void)
116 247
117 printf("Checking for tmpfs mount on /dev/shm..."); 248 printf("Checking for tmpfs mount on /dev/shm...");
118 249
250 path = realpath("/dev/shm", NULL);
251 if (!path) {
252 printf("failed to check real path, errno = %d\n", errno);
253 return;
254 }
255 printf("%s...", path);
256
257 /*
258 * The buffer needs to be able to fit the full octal-escaped path, a
259 * space, and a trailing null in order to successfully decode it.
260 */
261 bufsize = octal_encoded_length(path, " \t\n\\") + 2;
262
263 if (bufsize < 128)
264 bufsize = 128;
265
266 buf = malloc(bufsize);
267 if (!buf) {
268 printf("malloc failed, errno = %d\n", errno);
269 goto out;
270 }
271 buf[0] = '\0';
272
119 fd = open("/proc/mounts", O_RDONLY); 273 fd = open("/proc/mounts", O_RDONLY);
120 if (fd < 0) { 274 if (fd < 0) {
121 printf("failed to open /proc/mounts, errno = %d\n", errno); 275 printf("failed to open /proc/mounts, errno = %d\n", errno);
122 return; 276 goto out1;
123 } 277 }
124 278
279 outcome = OUTCOME_NOTHING_MOUNTED;
125 while (1) { 280 while (1) {
126 found = next(fd, buf, ARRAY_SIZE(buf), ' '); 281 found = read_mount(fd, buf, bufsize, path, &outcome);
127 if (found != 1)
128 break;
129
130 if (!strncmp(buf, "/dev/shm", strlen("/dev/shm")))
131 goto found;
132
133 found = next(fd, buf, ARRAY_SIZE(buf), '\n');
134 if (found != 1) 282 if (found != 1)
135 break; 283 break;
136 } 284 }
137 285
138err: 286 if (found < 0) {
139 if (found == 0)
140 printf("nothing mounted on /dev/shm\n");
141 else if (found < 0)
142 printf("read returned errno %d\n", -found); 287 printf("read returned errno %d\n", -found);
288 } else {
289 switch (outcome) {
290 case OUTCOME_TMPFS_MOUNT:
291 printf("OK\n");
292 default_tmpdir = "/dev/shm";
293 break;
143 294
144out: 295 case OUTCOME_NON_TMPFS_MOUNT:
145 close(fd); 296 printf("not tmpfs\n");
146 297 break;
147 return;
148
149found:
150 found = next(fd, buf, ARRAY_SIZE(buf), ' ');
151 if (found != 1)
152 goto err;
153 298
154 if (strncmp(buf, "tmpfs", strlen("tmpfs"))) { 299 default:
155 printf("not tmpfs\n"); 300 printf("nothing mounted on /dev/shm\n");
156 goto out; 301 break;
302 }
157 } 303 }
158 304
159 printf("OK\n"); 305 close(fd);
160 default_tmpdir = "/dev/shm"; 306out1:
161 goto out; 307 free(buf);
308out:
309 free(path);
162} 310}
163 311
164static int __init make_tempfile(const char *template, char **out_tempname, 312static int __init make_tempfile(const char *template, char **out_tempname,
diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
index 9d9f1b4bf826..905924b773d3 100644
--- a/arch/um/os-Linux/signal.c
+++ b/arch/um/os-Linux/signal.c
@@ -25,7 +25,7 @@ void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
25 [SIGIO] = sigio_handler, 25 [SIGIO] = sigio_handler,
26 [SIGVTALRM] = timer_handler }; 26 [SIGVTALRM] = timer_handler };
27 27
28static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc) 28static void sig_handler_common(int sig, struct siginfo *si, mcontext_t *mc)
29{ 29{
30 struct uml_pt_regs r; 30 struct uml_pt_regs r;
31 int save_errno = errno; 31 int save_errno = errno;
@@ -61,7 +61,7 @@ static void sig_handler_common(int sig, siginfo_t *si, mcontext_t *mc)
61static int signals_enabled; 61static int signals_enabled;
62static unsigned int signals_pending; 62static unsigned int signals_pending;
63 63
64void sig_handler(int sig, siginfo_t *si, mcontext_t *mc) 64void sig_handler(int sig, struct siginfo *si, mcontext_t *mc)
65{ 65{
66 int enabled; 66 int enabled;
67 67
@@ -120,7 +120,7 @@ void set_sigstack(void *sig_stack, int size)
120 panic("enabling signal stack failed, errno = %d\n", errno); 120 panic("enabling signal stack failed, errno = %d\n", errno);
121} 121}
122 122
123static void (*handlers[_NSIG])(int sig, siginfo_t *si, mcontext_t *mc) = { 123static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
124 [SIGSEGV] = sig_handler, 124 [SIGSEGV] = sig_handler,
125 [SIGBUS] = sig_handler, 125 [SIGBUS] = sig_handler,
126 [SIGILL] = sig_handler, 126 [SIGILL] = sig_handler,
@@ -162,7 +162,7 @@ static void hard_handler(int sig, siginfo_t *si, void *p)
162 while ((sig = ffs(pending)) != 0){ 162 while ((sig = ffs(pending)) != 0){
163 sig--; 163 sig--;
164 pending &= ~(1 << sig); 164 pending &= ~(1 << sig);
165 (*handlers[sig])(sig, si, mc); 165 (*handlers[sig])(sig, (struct siginfo *)si, mc);
166 } 166 }
167 167
168 /* 168 /*
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index 4625949bf1e4..d531879a4617 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err; 57 int n, status, err, bad_stop = 0;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,6 +74,8 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
77 79
78bad_wait: 80bad_wait:
79 err = ptrace_dump_regs(pid); 81 err = ptrace_dump_regs(pid);
@@ -83,7 +85,10 @@ bad_wait:
83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
85 status); 87 status);
86 fatal_sigsegv(); 88 if (bad_stop)
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
87} 92}
88 93
89extern unsigned long current_stub_stack(void); 94extern unsigned long current_stub_stack(void);
@@ -409,7 +414,7 @@ void userspace(struct uml_pt_regs *regs)
409 if (WIFSTOPPED(status)) { 414 if (WIFSTOPPED(status)) {
410 int sig = WSTOPSIG(status); 415 int sig = WSTOPSIG(status);
411 416
412 ptrace(PTRACE_GETSIGINFO, pid, 0, &si); 417 ptrace(PTRACE_GETSIGINFO, pid, 0, (struct siginfo *)&si);
413 418
414 switch (sig) { 419 switch (sig) {
415 case SIGSEGV: 420 case SIGSEGV:
@@ -417,7 +422,7 @@ void userspace(struct uml_pt_regs *regs)
417 !ptrace_faultinfo) { 422 !ptrace_faultinfo) {
418 get_skas_faultinfo(pid, 423 get_skas_faultinfo(pid,
419 &regs->faultinfo); 424 &regs->faultinfo);
420 (*sig_info[SIGSEGV])(SIGSEGV, &si, 425 (*sig_info[SIGSEGV])(SIGSEGV, (struct siginfo *)&si,
421 regs); 426 regs);
422 } 427 }
423 else handle_segv(pid, regs); 428 else handle_segv(pid, regs);
@@ -426,14 +431,14 @@ void userspace(struct uml_pt_regs *regs)
426 handle_trap(pid, regs, local_using_sysemu); 431 handle_trap(pid, regs, local_using_sysemu);
427 break; 432 break;
428 case SIGTRAP: 433 case SIGTRAP:
429 relay_signal(SIGTRAP, &si, regs); 434 relay_signal(SIGTRAP, (struct siginfo *)&si, regs);
430 break; 435 break;
431 case SIGVTALRM: 436 case SIGVTALRM:
432 now = os_nsecs(); 437 now = os_nsecs();
433 if (now < nsecs) 438 if (now < nsecs)
434 break; 439 break;
435 block_signals(); 440 block_signals();
436 (*sig_info[sig])(sig, &si, regs); 441 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
437 unblock_signals(); 442 unblock_signals();
438 nsecs = timer.it_value.tv_sec * 443 nsecs = timer.it_value.tv_sec *
439 UM_NSEC_PER_SEC + 444 UM_NSEC_PER_SEC +
@@ -447,7 +452,7 @@ void userspace(struct uml_pt_regs *regs)
447 case SIGFPE: 452 case SIGFPE:
448 case SIGWINCH: 453 case SIGWINCH:
449 block_signals(); 454 block_signals();
450 (*sig_info[sig])(sig, &si, regs); 455 (*sig_info[sig])(sig, (struct siginfo *)&si, regs);
451 unblock_signals(); 456 unblock_signals();
452 break; 457 break;
453 default: 458 default:
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index 7d6ba9db1be9..6c63c358a7e6 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o 27obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o 28obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o 29obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
30obj-$(CONFIG_CRYPTO_CRCT10DIF_PCLMUL) += crct10dif-pclmul.o
31 30
32# These modules require assembler to support AVX. 31# These modules require assembler to support AVX.
33ifeq ($(avx_supported),yes) 32ifeq ($(avx_supported),yes)
@@ -82,4 +81,3 @@ crc32c-intel-$(CONFIG_64BIT) += crc32c-pcl-intel-asm_64.o
82crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o 81crc32-pclmul-y := crc32-pclmul_asm.o crc32-pclmul_glue.o
83sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o 82sha256-ssse3-y := sha256-ssse3-asm.o sha256-avx-asm.o sha256-avx2-asm.o sha256_ssse3_glue.o
84sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o 83sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o
85crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
deleted file mode 100644
index 35e97569d05f..000000000000
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ /dev/null
@@ -1,643 +0,0 @@
1########################################################################
2# Implement fast CRC-T10DIF computation with SSE and PCLMULQDQ instructions
3#
4# Copyright (c) 2013, Intel Corporation
5#
6# Authors:
7# Erdinc Ozturk <erdinc.ozturk@intel.com>
8# Vinodh Gopal <vinodh.gopal@intel.com>
9# James Guilford <james.guilford@intel.com>
10# Tim Chen <tim.c.chen@linux.intel.com>
11#
12# This software is available to you under a choice of one of two
13# licenses. You may choose to be licensed under the terms of the GNU
14# General Public License (GPL) Version 2, available from the file
15# COPYING in the main directory of this source tree, or the
16# OpenIB.org BSD license below:
17#
18# Redistribution and use in source and binary forms, with or without
19# modification, are permitted provided that the following conditions are
20# met:
21#
22# * Redistributions of source code must retain the above copyright
23# notice, this list of conditions and the following disclaimer.
24#
25# * Redistributions in binary form must reproduce the above copyright
26# notice, this list of conditions and the following disclaimer in the
27# documentation and/or other materials provided with the
28# distribution.
29#
30# * Neither the name of the Intel Corporation nor the names of its
31# contributors may be used to endorse or promote products derived from
32# this software without specific prior written permission.
33#
34#
35# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
36# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
39# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
40# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
41# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
42# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
43# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
44# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
45# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46########################################################################
47# Function API:
48# UINT16 crc_t10dif_pcl(
49# UINT16 init_crc, //initial CRC value, 16 bits
50# const unsigned char *buf, //buffer pointer to calculate CRC on
51# UINT64 len //buffer length in bytes (64-bit data)
52# );
53#
54# Reference paper titled "Fast CRC Computation for Generic
55# Polynomials Using PCLMULQDQ Instruction"
56# URL: http://www.intel.com/content/dam/www/public/us/en/documents
57# /white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
58#
59#
60
61#include <linux/linkage.h>
62
63.text
64
65#define arg1 %rdi
66#define arg2 %rsi
67#define arg3 %rdx
68
69#define arg1_low32 %edi
70
71ENTRY(crc_t10dif_pcl)
72.align 16
73
74 # adjust the 16-bit initial_crc value, scale it to 32 bits
75 shl $16, arg1_low32
76
77 # Allocate Stack Space
78 mov %rsp, %rcx
79 sub $16*2, %rsp
80 # align stack to 16 byte boundary
81 and $~(0x10 - 1), %rsp
82
83 # check if smaller than 256
84 cmp $256, arg3
85
86 # for sizes less than 128, we can't fold 64B at a time...
87 jl _less_than_128
88
89
90 # load the initial crc value
91 movd arg1_low32, %xmm10 # initial crc
92
93 # crc value does not need to be byte-reflected, but it needs
94 # to be moved to the high part of the register.
95 # because data will be byte-reflected and will align with
96 # initial crc at correct place.
97 pslldq $12, %xmm10
98
99 movdqa SHUF_MASK(%rip), %xmm11
100 # receive the initial 64B data, xor the initial crc value
101 movdqu 16*0(arg2), %xmm0
102 movdqu 16*1(arg2), %xmm1
103 movdqu 16*2(arg2), %xmm2
104 movdqu 16*3(arg2), %xmm3
105 movdqu 16*4(arg2), %xmm4
106 movdqu 16*5(arg2), %xmm5
107 movdqu 16*6(arg2), %xmm6
108 movdqu 16*7(arg2), %xmm7
109
110 pshufb %xmm11, %xmm0
111 # XOR the initial_crc value
112 pxor %xmm10, %xmm0
113 pshufb %xmm11, %xmm1
114 pshufb %xmm11, %xmm2
115 pshufb %xmm11, %xmm3
116 pshufb %xmm11, %xmm4
117 pshufb %xmm11, %xmm5
118 pshufb %xmm11, %xmm6
119 pshufb %xmm11, %xmm7
120
121 movdqa rk3(%rip), %xmm10 #xmm10 has rk3 and rk4
122 #imm value of pclmulqdq instruction
123 #will determine which constant to use
124
125 #################################################################
126 # we subtract 256 instead of 128 to save one instruction from the loop
127 sub $256, arg3
128
129 # at this section of the code, there is 64*x+y (0<=y<64) bytes of
130 # buffer. The _fold_64_B_loop will fold 64B at a time
131 # until we have 64+y Bytes of buffer
132
133
134 # fold 64B at a time. This section of the code folds 4 xmm
135 # registers in parallel
136_fold_64_B_loop:
137
138 # update the buffer pointer
139 add $128, arg2 # buf += 64#
140
141 movdqu 16*0(arg2), %xmm9
142 movdqu 16*1(arg2), %xmm12
143 pshufb %xmm11, %xmm9
144 pshufb %xmm11, %xmm12
145 movdqa %xmm0, %xmm8
146 movdqa %xmm1, %xmm13
147 pclmulqdq $0x0 , %xmm10, %xmm0
148 pclmulqdq $0x11, %xmm10, %xmm8
149 pclmulqdq $0x0 , %xmm10, %xmm1
150 pclmulqdq $0x11, %xmm10, %xmm13
151 pxor %xmm9 , %xmm0
152 xorps %xmm8 , %xmm0
153 pxor %xmm12, %xmm1
154 xorps %xmm13, %xmm1
155
156 movdqu 16*2(arg2), %xmm9
157 movdqu 16*3(arg2), %xmm12
158 pshufb %xmm11, %xmm9
159 pshufb %xmm11, %xmm12
160 movdqa %xmm2, %xmm8
161 movdqa %xmm3, %xmm13
162 pclmulqdq $0x0, %xmm10, %xmm2
163 pclmulqdq $0x11, %xmm10, %xmm8
164 pclmulqdq $0x0, %xmm10, %xmm3
165 pclmulqdq $0x11, %xmm10, %xmm13
166 pxor %xmm9 , %xmm2
167 xorps %xmm8 , %xmm2
168 pxor %xmm12, %xmm3
169 xorps %xmm13, %xmm3
170
171 movdqu 16*4(arg2), %xmm9
172 movdqu 16*5(arg2), %xmm12
173 pshufb %xmm11, %xmm9
174 pshufb %xmm11, %xmm12
175 movdqa %xmm4, %xmm8
176 movdqa %xmm5, %xmm13
177 pclmulqdq $0x0, %xmm10, %xmm4
178 pclmulqdq $0x11, %xmm10, %xmm8
179 pclmulqdq $0x0, %xmm10, %xmm5
180 pclmulqdq $0x11, %xmm10, %xmm13
181 pxor %xmm9 , %xmm4
182 xorps %xmm8 , %xmm4
183 pxor %xmm12, %xmm5
184 xorps %xmm13, %xmm5
185
186 movdqu 16*6(arg2), %xmm9
187 movdqu 16*7(arg2), %xmm12
188 pshufb %xmm11, %xmm9
189 pshufb %xmm11, %xmm12
190 movdqa %xmm6 , %xmm8
191 movdqa %xmm7 , %xmm13
192 pclmulqdq $0x0 , %xmm10, %xmm6
193 pclmulqdq $0x11, %xmm10, %xmm8
194 pclmulqdq $0x0 , %xmm10, %xmm7
195 pclmulqdq $0x11, %xmm10, %xmm13
196 pxor %xmm9 , %xmm6
197 xorps %xmm8 , %xmm6
198 pxor %xmm12, %xmm7
199 xorps %xmm13, %xmm7
200
201 sub $128, arg3
202
203 # check if there is another 64B in the buffer to be able to fold
204 jge _fold_64_B_loop
205 ##################################################################
206
207
208 add $128, arg2
209 # at this point, the buffer pointer is pointing at the last y Bytes
210 # of the buffer the 64B of folded data is in 4 of the xmm
211 # registers: xmm0, xmm1, xmm2, xmm3
212
213
214 # fold the 8 xmm registers to 1 xmm register with different constants
215
216 movdqa rk9(%rip), %xmm10
217 movdqa %xmm0, %xmm8
218 pclmulqdq $0x11, %xmm10, %xmm0
219 pclmulqdq $0x0 , %xmm10, %xmm8
220 pxor %xmm8, %xmm7
221 xorps %xmm0, %xmm7
222
223 movdqa rk11(%rip), %xmm10
224 movdqa %xmm1, %xmm8
225 pclmulqdq $0x11, %xmm10, %xmm1
226 pclmulqdq $0x0 , %xmm10, %xmm8
227 pxor %xmm8, %xmm7
228 xorps %xmm1, %xmm7
229
230 movdqa rk13(%rip), %xmm10
231 movdqa %xmm2, %xmm8
232 pclmulqdq $0x11, %xmm10, %xmm2
233 pclmulqdq $0x0 , %xmm10, %xmm8
234 pxor %xmm8, %xmm7
235 pxor %xmm2, %xmm7
236
237 movdqa rk15(%rip), %xmm10
238 movdqa %xmm3, %xmm8
239 pclmulqdq $0x11, %xmm10, %xmm3
240 pclmulqdq $0x0 , %xmm10, %xmm8
241 pxor %xmm8, %xmm7
242 xorps %xmm3, %xmm7
243
244 movdqa rk17(%rip), %xmm10
245 movdqa %xmm4, %xmm8
246 pclmulqdq $0x11, %xmm10, %xmm4
247 pclmulqdq $0x0 , %xmm10, %xmm8
248 pxor %xmm8, %xmm7
249 pxor %xmm4, %xmm7
250
251 movdqa rk19(%rip), %xmm10
252 movdqa %xmm5, %xmm8
253 pclmulqdq $0x11, %xmm10, %xmm5
254 pclmulqdq $0x0 , %xmm10, %xmm8
255 pxor %xmm8, %xmm7
256 xorps %xmm5, %xmm7
257
258 movdqa rk1(%rip), %xmm10 #xmm10 has rk1 and rk2
259 #imm value of pclmulqdq instruction
260 #will determine which constant to use
261 movdqa %xmm6, %xmm8
262 pclmulqdq $0x11, %xmm10, %xmm6
263 pclmulqdq $0x0 , %xmm10, %xmm8
264 pxor %xmm8, %xmm7
265 pxor %xmm6, %xmm7
266
267
268 # instead of 64, we add 48 to the loop counter to save 1 instruction
269 # from the loop instead of a cmp instruction, we use the negative
270 # flag with the jl instruction
271 add $128-16, arg3
272 jl _final_reduction_for_128
273
274 # now we have 16+y bytes left to reduce. 16 Bytes is in register xmm7
275 # and the rest is in memory. We can fold 16 bytes at a time if y>=16
276 # continue folding 16B at a time
277
278_16B_reduction_loop:
279 movdqa %xmm7, %xmm8
280 pclmulqdq $0x11, %xmm10, %xmm7
281 pclmulqdq $0x0 , %xmm10, %xmm8
282 pxor %xmm8, %xmm7
283 movdqu (arg2), %xmm0
284 pshufb %xmm11, %xmm0
285 pxor %xmm0 , %xmm7
286 add $16, arg2
287 sub $16, arg3
288 # instead of a cmp instruction, we utilize the flags with the
289 # jge instruction equivalent of: cmp arg3, 16-16
290 # check if there is any more 16B in the buffer to be able to fold
291 jge _16B_reduction_loop
292
293 #now we have 16+z bytes left to reduce, where 0<= z < 16.
294 #first, we reduce the data in the xmm7 register
295
296
297_final_reduction_for_128:
298 # check if any more data to fold. If not, compute the CRC of
299 # the final 128 bits
300 add $16, arg3
301 je _128_done
302
303 # here we are getting data that is less than 16 bytes.
304 # since we know that there was data before the pointer, we can
305 # offset the input pointer before the actual point, to receive
306 # exactly 16 bytes. after that the registers need to be adjusted.
307_get_last_two_xmms:
308 movdqa %xmm7, %xmm2
309
310 movdqu -16(arg2, arg3), %xmm1
311 pshufb %xmm11, %xmm1
312
313 # get rid of the extra data that was loaded before
314 # load the shift constant
315 lea pshufb_shf_table+16(%rip), %rax
316 sub arg3, %rax
317 movdqu (%rax), %xmm0
318
319 # shift xmm2 to the left by arg3 bytes
320 pshufb %xmm0, %xmm2
321
322 # shift xmm7 to the right by 16-arg3 bytes
323 pxor mask1(%rip), %xmm0
324 pshufb %xmm0, %xmm7
325 pblendvb %xmm2, %xmm1 #xmm0 is implicit
326
327 # fold 16 Bytes
328 movdqa %xmm1, %xmm2
329 movdqa %xmm7, %xmm8
330 pclmulqdq $0x11, %xmm10, %xmm7
331 pclmulqdq $0x0 , %xmm10, %xmm8
332 pxor %xmm8, %xmm7
333 pxor %xmm2, %xmm7
334
335_128_done:
336 # compute crc of a 128-bit value
337 movdqa rk5(%rip), %xmm10 # rk5 and rk6 in xmm10
338 movdqa %xmm7, %xmm0
339
340 #64b fold
341 pclmulqdq $0x1, %xmm10, %xmm7
342 pslldq $8 , %xmm0
343 pxor %xmm0, %xmm7
344
345 #32b fold
346 movdqa %xmm7, %xmm0
347
348 pand mask2(%rip), %xmm0
349
350 psrldq $12, %xmm7
351 pclmulqdq $0x10, %xmm10, %xmm7
352 pxor %xmm0, %xmm7
353
354 #barrett reduction
355_barrett:
356 movdqa rk7(%rip), %xmm10 # rk7 and rk8 in xmm10
357 movdqa %xmm7, %xmm0
358 pclmulqdq $0x01, %xmm10, %xmm7
359 pslldq $4, %xmm7
360 pclmulqdq $0x11, %xmm10, %xmm7
361
362 pslldq $4, %xmm7
363 pxor %xmm0, %xmm7
364 pextrd $1, %xmm7, %eax
365
366_cleanup:
367 # scale the result back to 16 bits
368 shr $16, %eax
369 mov %rcx, %rsp
370 ret
371
372########################################################################
373
374.align 16
375_less_than_128:
376
377 # check if there is enough buffer to be able to fold 16B at a time
378 cmp $32, arg3
379 jl _less_than_32
380 movdqa SHUF_MASK(%rip), %xmm11
381
382 # now if there is, load the constants
383 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
384
385 movd arg1_low32, %xmm0 # get the initial crc value
386 pslldq $12, %xmm0 # align it to its correct place
387 movdqu (arg2), %xmm7 # load the plaintext
388 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
389 pxor %xmm0, %xmm7
390
391
392 # update the buffer pointer
393 add $16, arg2
394
395 # update the counter. subtract 32 instead of 16 to save one
396 # instruction from the loop
397 sub $32, arg3
398
399 jmp _16B_reduction_loop
400
401
402.align 16
403_less_than_32:
404 # mov initial crc to the return value. this is necessary for
405 # zero-length buffers.
406 mov arg1_low32, %eax
407 test arg3, arg3
408 je _cleanup
409
410 movdqa SHUF_MASK(%rip), %xmm11
411
412 movd arg1_low32, %xmm0 # get the initial crc value
413 pslldq $12, %xmm0 # align it to its correct place
414
415 cmp $16, arg3
416 je _exact_16_left
417 jl _less_than_16_left
418
419 movdqu (arg2), %xmm7 # load the plaintext
420 pshufb %xmm11, %xmm7 # byte-reflect the plaintext
421 pxor %xmm0 , %xmm7 # xor the initial crc value
422 add $16, arg2
423 sub $16, arg3
424 movdqa rk1(%rip), %xmm10 # rk1 and rk2 in xmm10
425 jmp _get_last_two_xmms
426
427
428.align 16
429_less_than_16_left:
430 # use stack space to load data less than 16 bytes, zero-out
431 # the 16B in memory first.
432
433 pxor %xmm1, %xmm1
434 mov %rsp, %r11
435 movdqa %xmm1, (%r11)
436
437 cmp $4, arg3
438 jl _only_less_than_4
439
440 # backup the counter value
441 mov arg3, %r9
442 cmp $8, arg3
443 jl _less_than_8_left
444
445 # load 8 Bytes
446 mov (arg2), %rax
447 mov %rax, (%r11)
448 add $8, %r11
449 sub $8, arg3
450 add $8, arg2
451_less_than_8_left:
452
453 cmp $4, arg3
454 jl _less_than_4_left
455
456 # load 4 Bytes
457 mov (arg2), %eax
458 mov %eax, (%r11)
459 add $4, %r11
460 sub $4, arg3
461 add $4, arg2
462_less_than_4_left:
463
464 cmp $2, arg3
465 jl _less_than_2_left
466
467 # load 2 Bytes
468 mov (arg2), %ax
469 mov %ax, (%r11)
470 add $2, %r11
471 sub $2, arg3
472 add $2, arg2
473_less_than_2_left:
474 cmp $1, arg3
475 jl _zero_left
476
477 # load 1 Byte
478 mov (arg2), %al
479 mov %al, (%r11)
480_zero_left:
481 movdqa (%rsp), %xmm7
482 pshufb %xmm11, %xmm7
483 pxor %xmm0 , %xmm7 # xor the initial crc value
484
485 # shl r9, 4
486 lea pshufb_shf_table+16(%rip), %rax
487 sub %r9, %rax
488 movdqu (%rax), %xmm0
489 pxor mask1(%rip), %xmm0
490
491 pshufb %xmm0, %xmm7
492 jmp _128_done
493
494.align 16
495_exact_16_left:
496 movdqu (arg2), %xmm7
497 pshufb %xmm11, %xmm7
498 pxor %xmm0 , %xmm7 # xor the initial crc value
499
500 jmp _128_done
501
502_only_less_than_4:
503 cmp $3, arg3
504 jl _only_less_than_3
505
506 # load 3 Bytes
507 mov (arg2), %al
508 mov %al, (%r11)
509
510 mov 1(arg2), %al
511 mov %al, 1(%r11)
512
513 mov 2(arg2), %al
514 mov %al, 2(%r11)
515
516 movdqa (%rsp), %xmm7
517 pshufb %xmm11, %xmm7
518 pxor %xmm0 , %xmm7 # xor the initial crc value
519
520 psrldq $5, %xmm7
521
522 jmp _barrett
523_only_less_than_3:
524 cmp $2, arg3
525 jl _only_less_than_2
526
527 # load 2 Bytes
528 mov (arg2), %al
529 mov %al, (%r11)
530
531 mov 1(arg2), %al
532 mov %al, 1(%r11)
533
534 movdqa (%rsp), %xmm7
535 pshufb %xmm11, %xmm7
536 pxor %xmm0 , %xmm7 # xor the initial crc value
537
538 psrldq $6, %xmm7
539
540 jmp _barrett
541_only_less_than_2:
542
543 # load 1 Byte
544 mov (arg2), %al
545 mov %al, (%r11)
546
547 movdqa (%rsp), %xmm7
548 pshufb %xmm11, %xmm7
549 pxor %xmm0 , %xmm7 # xor the initial crc value
550
551 psrldq $7, %xmm7
552
553 jmp _barrett
554
555ENDPROC(crc_t10dif_pcl)
556
557.data
558
559# precomputed constants
560# these constants are precomputed from the poly:
561# 0x8bb70000 (0x8bb7 scaled to 32 bits)
562.align 16
563# Q = 0x18BB70000
564# rk1 = 2^(32*3) mod Q << 32
565# rk2 = 2^(32*5) mod Q << 32
566# rk3 = 2^(32*15) mod Q << 32
567# rk4 = 2^(32*17) mod Q << 32
568# rk5 = 2^(32*3) mod Q << 32
569# rk6 = 2^(32*2) mod Q << 32
570# rk7 = floor(2^64/Q)
571# rk8 = Q
572rk1:
573.quad 0x2d56000000000000
574rk2:
575.quad 0x06df000000000000
576rk3:
577.quad 0x9d9d000000000000
578rk4:
579.quad 0x7cf5000000000000
580rk5:
581.quad 0x2d56000000000000
582rk6:
583.quad 0x1368000000000000
584rk7:
585.quad 0x00000001f65a57f8
586rk8:
587.quad 0x000000018bb70000
588
589rk9:
590.quad 0xceae000000000000
591rk10:
592.quad 0xbfd6000000000000
593rk11:
594.quad 0x1e16000000000000
595rk12:
596.quad 0x713c000000000000
597rk13:
598.quad 0xf7f9000000000000
599rk14:
600.quad 0x80a6000000000000
601rk15:
602.quad 0x044c000000000000
603rk16:
604.quad 0xe658000000000000
605rk17:
606.quad 0xad18000000000000
607rk18:
608.quad 0xa497000000000000
609rk19:
610.quad 0x6ee3000000000000
611rk20:
612.quad 0xe7b5000000000000
613
614
615
616mask1:
617.octa 0x80808080808080808080808080808080
618mask2:
619.octa 0x00000000FFFFFFFFFFFFFFFFFFFFFFFF
620
621SHUF_MASK:
622.octa 0x000102030405060708090A0B0C0D0E0F
623
624pshufb_shf_table:
625# use these values for shift constants for the pshufb instruction
626# different alignments result in values as shown:
627# DDQ 0x008f8e8d8c8b8a898887868584838281 # shl 15 (16-1) / shr1
628# DDQ 0x01008f8e8d8c8b8a8988878685848382 # shl 14 (16-3) / shr2
629# DDQ 0x0201008f8e8d8c8b8a89888786858483 # shl 13 (16-4) / shr3
630# DDQ 0x030201008f8e8d8c8b8a898887868584 # shl 12 (16-4) / shr4
631# DDQ 0x04030201008f8e8d8c8b8a8988878685 # shl 11 (16-5) / shr5
632# DDQ 0x0504030201008f8e8d8c8b8a89888786 # shl 10 (16-6) / shr6
633# DDQ 0x060504030201008f8e8d8c8b8a898887 # shl 9 (16-7) / shr7
634# DDQ 0x07060504030201008f8e8d8c8b8a8988 # shl 8 (16-8) / shr8
635# DDQ 0x0807060504030201008f8e8d8c8b8a89 # shl 7 (16-9) / shr9
636# DDQ 0x090807060504030201008f8e8d8c8b8a # shl 6 (16-10) / shr10
637# DDQ 0x0a090807060504030201008f8e8d8c8b # shl 5 (16-11) / shr11
638# DDQ 0x0b0a090807060504030201008f8e8d8c # shl 4 (16-12) / shr12
639# DDQ 0x0c0b0a090807060504030201008f8e8d # shl 3 (16-13) / shr13
640# DDQ 0x0d0c0b0a090807060504030201008f8e # shl 2 (16-14) / shr14
641# DDQ 0x0e0d0c0b0a090807060504030201008f # shl 1 (16-15) / shr15
642.octa 0x8f8e8d8c8b8a89888786858483828100
643.octa 0x000e0d0c0b0a09080706050403020100
diff --git a/arch/x86/crypto/crct10dif-pclmul_glue.c b/arch/x86/crypto/crct10dif-pclmul_glue.c
deleted file mode 100644
index 7845d7fd54c0..000000000000
--- a/arch/x86/crypto/crct10dif-pclmul_glue.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * Cryptographic API.
3 *
4 * T10 Data Integrity Field CRC16 Crypto Transform using PCLMULQDQ Instructions
5 *
6 * Copyright (C) 2013 Intel Corporation
7 * Author: Tim Chen <tim.c.chen@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
18 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
19 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/crc-t10dif.h>
28#include <crypto/internal/hash.h>
29#include <linux/init.h>
30#include <linux/string.h>
31#include <linux/kernel.h>
32#include <asm/i387.h>
33#include <asm/cpufeature.h>
34#include <asm/cpu_device_id.h>
35
36asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
37 size_t len);
38
39struct chksum_desc_ctx {
40 __u16 crc;
41};
42
43/*
44 * Steps through buffer one byte at at time, calculates reflected
45 * crc using table.
46 */
47
48static int chksum_init(struct shash_desc *desc)
49{
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
51
52 ctx->crc = 0;
53
54 return 0;
55}
56
57static int chksum_update(struct shash_desc *desc, const u8 *data,
58 unsigned int length)
59{
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
61
62 if (irq_fpu_usable()) {
63 kernel_fpu_begin();
64 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
65 kernel_fpu_end();
66 } else
67 ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
68 return 0;
69}
70
71static int chksum_final(struct shash_desc *desc, u8 *out)
72{
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
74
75 *(__u16 *)out = ctx->crc;
76 return 0;
77}
78
79static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
80 u8 *out)
81{
82 if (irq_fpu_usable()) {
83 kernel_fpu_begin();
84 *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
85 kernel_fpu_end();
86 } else
87 *(__u16 *)out = crc_t10dif_generic(*crcp, data, len);
88 return 0;
89}
90
91static int chksum_finup(struct shash_desc *desc, const u8 *data,
92 unsigned int len, u8 *out)
93{
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
95
96 return __chksum_finup(&ctx->crc, data, len, out);
97}
98
99static int chksum_digest(struct shash_desc *desc, const u8 *data,
100 unsigned int length, u8 *out)
101{
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
103
104 return __chksum_finup(&ctx->crc, data, length, out);
105}
106
107static struct shash_alg alg = {
108 .digestsize = CRC_T10DIF_DIGEST_SIZE,
109 .init = chksum_init,
110 .update = chksum_update,
111 .final = chksum_final,
112 .finup = chksum_finup,
113 .digest = chksum_digest,
114 .descsize = sizeof(struct chksum_desc_ctx),
115 .base = {
116 .cra_name = "crct10dif",
117 .cra_driver_name = "crct10dif-pclmul",
118 .cra_priority = 200,
119 .cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
120 .cra_module = THIS_MODULE,
121 }
122};
123
124static const struct x86_cpu_id crct10dif_cpu_id[] = {
125 X86_FEATURE_MATCH(X86_FEATURE_PCLMULQDQ),
126 {}
127};
128MODULE_DEVICE_TABLE(x86cpu, crct10dif_cpu_id);
129
130static int __init crct10dif_intel_mod_init(void)
131{
132 if (!x86_match_cpu(crct10dif_cpu_id))
133 return -ENODEV;
134
135 return crypto_register_shash(&alg);
136}
137
138static void __exit crct10dif_intel_mod_fini(void)
139{
140 crypto_unregister_shash(&alg);
141}
142
143module_init(crct10dif_intel_mod_init);
144module_exit(crct10dif_intel_mod_fini);
145
146MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
147MODULE_DESCRIPTION("T10 DIF CRC calculation accelerated with PCLMULQDQ.");
148MODULE_LICENSE("GPL");
149
150MODULE_ALIAS("crct10dif");
151MODULE_ALIAS("crct10dif-pclmul");
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 5f9a1243190e..d2b12988d2ed 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -28,7 +28,7 @@ struct x86_cpu {
28#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
29extern int arch_register_cpu(int num); 29extern int arch_register_cpu(int num);
30extern void arch_unregister_cpu(int); 30extern void arch_unregister_cpu(int);
31extern void __cpuinit start_cpu0(void); 31extern void start_cpu0(void);
32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0 32#ifdef CONFIG_DEBUG_HOTPLUG_CPU0
33extern int _debug_hotplug_cpu(int cpu, int action); 33extern int _debug_hotplug_cpu(int cpu, int action);
34#endif 34#endif
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6bc3985ee473..f98bd6625318 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
60#ifdef CONFIG_MICROCODE_EARLY 60#ifdef CONFIG_MICROCODE_EARLY
61#define MAX_UCODE_COUNT 128 61#define MAX_UCODE_COUNT 128
62extern void __init load_ucode_bsp(void); 62extern void __init load_ucode_bsp(void);
63extern void __cpuinit load_ucode_ap(void); 63extern void load_ucode_ap(void);
64extern int __init save_microcode_in_initrd(void); 64extern int __init save_microcode_in_initrd(void);
65#else 65#else
66static inline void __init load_ucode_bsp(void) {} 66static inline void __init load_ucode_bsp(void) {}
67static inline void __cpuinit load_ucode_ap(void) {} 67static inline void load_ucode_ap(void) {}
68static inline int __init save_microcode_in_initrd(void) 68static inline int __init save_microcode_in_initrd(void)
69{ 69{
70 return 0; 70 return 0;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index c6b043f40271..50e5c58ced23 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -67,11 +67,11 @@ extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
67extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; 67extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
68#endif 68#endif
69extern void __init load_ucode_amd_bsp(void); 69extern void __init load_ucode_amd_bsp(void);
70extern void __cpuinit load_ucode_amd_ap(void); 70extern void load_ucode_amd_ap(void);
71extern int __init save_microcode_in_initrd_amd(void); 71extern int __init save_microcode_in_initrd_amd(void);
72#else 72#else
73static inline void __init load_ucode_amd_bsp(void) {} 73static inline void __init load_ucode_amd_bsp(void) {}
74static inline void __cpuinit load_ucode_amd_ap(void) {} 74static inline void load_ucode_amd_ap(void) {}
75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
76#endif 76#endif
77 77
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 87a085333cbf..9067166409bf 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -65,12 +65,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev);
65 65
66#ifdef CONFIG_MICROCODE_INTEL_EARLY 66#ifdef CONFIG_MICROCODE_INTEL_EARLY
67extern void __init load_ucode_intel_bsp(void); 67extern void __init load_ucode_intel_bsp(void);
68extern void __cpuinit load_ucode_intel_ap(void); 68extern void load_ucode_intel_ap(void);
69extern void show_ucode_info_early(void); 69extern void show_ucode_info_early(void);
70extern int __init save_microcode_in_initrd_intel(void); 70extern int __init save_microcode_in_initrd_intel(void);
71#else 71#else
72static inline __init void load_ucode_intel_bsp(void) {} 72static inline __init void load_ucode_intel_bsp(void) {}
73static inline __cpuinit void load_ucode_intel_ap(void) {} 73static inline void load_ucode_intel_ap(void) {}
74static inline void show_ucode_info_early(void) {} 74static inline void show_ucode_info_early(void) {}
75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } 75static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
76#endif 76#endif
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h
index 9b119da1d105..04a3fed22cfe 100644
--- a/arch/x86/include/asm/mmconfig.h
+++ b/arch/x86/include/asm/mmconfig.h
@@ -2,8 +2,8 @@
2#define _ASM_X86_MMCONFIG_H 2#define _ASM_X86_MMCONFIG_H
3 3
4#ifdef CONFIG_PCI_MMCONFIG 4#ifdef CONFIG_PCI_MMCONFIG
5extern void __cpuinit fam10h_check_enable_mmcfg(void); 5extern void fam10h_check_enable_mmcfg(void);
6extern void __cpuinit check_enable_amd_mmconf_dmi(void); 6extern void check_enable_amd_mmconf_dmi(void);
7#else 7#else
8static inline void fam10h_check_enable_mmcfg(void) { } 8static inline void fam10h_check_enable_mmcfg(void) { }
9static inline void check_enable_amd_mmconf_dmi(void) { } 9static inline void check_enable_amd_mmconf_dmi(void) { }
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3e2f42a4b872..626cf70082d7 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { }
94#define default_get_smp_config x86_init_uint_noop 94#define default_get_smp_config x86_init_uint_noop
95#endif 95#endif
96 96
97void __cpuinit generic_processor_info(int apicid, int version); 97void generic_processor_info(int apicid, int version);
98#ifdef CONFIG_ACPI 98#ifdef CONFIG_ACPI
99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); 99extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, 100extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 1b99ee5c9f00..4064acae625d 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node)
39 __apicid_to_node[apicid] = node; 39 __apicid_to_node[apicid] = node;
40} 40}
41 41
42extern int __cpuinit numa_cpu_node(int cpu); 42extern int numa_cpu_node(int cpu);
43 43
44#else /* CONFIG_NUMA */ 44#else /* CONFIG_NUMA */
45static inline void set_apicid_to_node(int apicid, s16 node) 45static inline void set_apicid_to_node(int apicid, s16 node)
@@ -60,8 +60,8 @@ static inline int numa_cpu_node(int cpu)
60extern void numa_set_node(int cpu, int node); 60extern void numa_set_node(int cpu, int node);
61extern void numa_clear_node(int cpu); 61extern void numa_clear_node(int cpu);
62extern void __init init_cpu_to_node(void); 62extern void __init init_cpu_to_node(void);
63extern void __cpuinit numa_add_cpu(int cpu); 63extern void numa_add_cpu(int cpu);
64extern void __cpuinit numa_remove_cpu(int cpu); 64extern void numa_remove_cpu(int cpu);
65#else /* CONFIG_NUMA */ 65#else /* CONFIG_NUMA */
66static inline void numa_set_node(int cpu, int node) { } 66static inline void numa_set_node(int cpu, int node) { }
67static inline void numa_clear_node(int cpu) { } 67static inline void numa_clear_node(int cpu) { }
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 29937c4f6ff8..24cf5aefb704 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -164,7 +164,7 @@ extern const struct seq_operations cpuinfo_op;
164#define cache_line_size() (boot_cpu_data.x86_cache_alignment) 164#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
165 165
166extern void cpu_detect(struct cpuinfo_x86 *c); 166extern void cpu_detect(struct cpuinfo_x86 *c);
167extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c); 167extern void fpu_detect(struct cpuinfo_x86 *c);
168 168
169extern void early_cpu_init(void); 169extern void early_cpu_init(void);
170extern void identify_boot_cpu(void); 170extern void identify_boot_cpu(void);
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 60bef663609a..bade6ac3b14f 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -27,7 +27,7 @@ extern int of_ioapic;
27extern u64 initial_dtb; 27extern u64 initial_dtb;
28extern void add_dtb(u64 data); 28extern void add_dtb(u64 data);
29extern void x86_add_irq_domains(void); 29extern void x86_add_irq_domains(void);
30void __cpuinit x86_of_pci_init(void); 30void x86_of_pci_init(void);
31void x86_dtb_init(void); 31void x86_dtb_init(void);
32#else 32#else
33static inline void add_dtb(u64 data) { } 33static inline void add_dtb(u64 data) { }
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index b073aaea747c..4137890e88e3 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -179,7 +179,7 @@ static inline int wbinvd_on_all_cpus(void)
179} 179}
180#endif /* CONFIG_SMP */ 180#endif /* CONFIG_SMP */
181 181
182extern unsigned disabled_cpus __cpuinitdata; 182extern unsigned disabled_cpus;
183 183
184#ifdef CONFIG_X86_32_SMP 184#ifdef CONFIG_X86_32_SMP
185/* 185/*
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d81a972dd506..2627a81253ee 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
195 return 0; 195 return 0;
196} 196}
197 197
198static void __cpuinit acpi_register_lapic(int id, u8 enabled) 198static void acpi_register_lapic(int id, u8 enabled)
199{ 199{
200 unsigned int ver = 0; 200 unsigned int ver = 0;
201 201
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void)
607#ifdef CONFIG_ACPI_HOTPLUG_CPU 607#ifdef CONFIG_ACPI_HOTPLUG_CPU
608#include <acpi/processor.h> 608#include <acpi/processor.h>
609 609
610static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 610static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
611{ 611{
612#ifdef CONFIG_ACPI_NUMA 612#ifdef CONFIG_ACPI_NUMA
613 int nid; 613 int nid;
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
620#endif 620#endif
621} 621}
622 622
623static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 623static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
624{ 624{
625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
626 union acpi_object *obj; 626 union acpi_object *obj;
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 2a34aaf3c8f1..33120100ff5e 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -48,9 +48,20 @@ int x86_acpi_suspend_lowlevel(void)
48#ifndef CONFIG_64BIT 48#ifndef CONFIG_64BIT
49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt); 49 native_store_gdt((struct desc_ptr *)&header->pmode_gdt);
50 50
51 /*
52 * We have to check that we can write back the value, and not
53 * just read it. At least on 90 nm Pentium M (Family 6, Model
54 * 13), reading an invalid MSR is not guaranteed to trap, see
55 * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
56 * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
57 * nm process with 512-KB L2 Cache Specification Update".
58 */
51 if (!rdmsr_safe(MSR_EFER, 59 if (!rdmsr_safe(MSR_EFER,
52 &header->pmode_efer_low, 60 &header->pmode_efer_low,
53 &header->pmode_efer_high)) 61 &header->pmode_efer_high) &&
62 !wrmsr_safe(MSR_EFER,
63 header->pmode_efer_low,
64 header->pmode_efer_high))
54 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); 65 header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
55#endif /* !CONFIG_64BIT */ 66#endif /* !CONFIG_64BIT */
56 67
@@ -61,7 +72,10 @@ int x86_acpi_suspend_lowlevel(void)
61 } 72 }
62 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, 73 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
63 &header->pmode_misc_en_low, 74 &header->pmode_misc_en_low,
64 &header->pmode_misc_en_high)) 75 &header->pmode_misc_en_high) &&
76 !wrmsr_safe(MSR_IA32_MISC_ENABLE,
77 header->pmode_misc_en_low,
78 header->pmode_misc_en_high))
65 header->pmode_behavior |= 79 header->pmode_behavior |=
66 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); 80 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
67 header->realmode_flags = acpi_realmode_flags; 81 header->realmode_flags = acpi_realmode_flags;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 99663b59123a..eca89c53a7f5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -58,7 +58,7 @@
58 58
59unsigned int num_processors; 59unsigned int num_processors;
60 60
61unsigned disabled_cpus __cpuinitdata; 61unsigned disabled_cpus;
62 62
63/* Processor that is doing the boot up */ 63/* Processor that is doing the boot up */
64unsigned int boot_cpu_physical_apicid = -1U; 64unsigned int boot_cpu_physical_apicid = -1U;
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
544 * Setup the local APIC timer for this CPU. Copy the initialized values 544 * Setup the local APIC timer for this CPU. Copy the initialized values
545 * of the boot CPU and register the clock event in the framework. 545 * of the boot CPU and register the clock event in the framework.
546 */ 546 */
547static void __cpuinit setup_APIC_timer(void) 547static void setup_APIC_timer(void)
548{ 548{
549 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 549 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
550 550
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void)
866 setup_APIC_timer(); 866 setup_APIC_timer();
867} 867}
868 868
869void __cpuinit setup_secondary_APIC_clock(void) 869void setup_secondary_APIC_clock(void)
870{ 870{
871 setup_APIC_timer(); 871 setup_APIC_timer();
872} 872}
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void)
1229 apic_write(APIC_LVT1, value); 1229 apic_write(APIC_LVT1, value);
1230} 1230}
1231 1231
1232static void __cpuinit lapic_setup_esr(void) 1232static void lapic_setup_esr(void)
1233{ 1233{
1234 unsigned int oldvalue, value, maxlvt; 1234 unsigned int oldvalue, value, maxlvt;
1235 1235
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void)
1276 * Used to setup local APIC while initializing BSP or bringin up APs. 1276 * Used to setup local APIC while initializing BSP or bringin up APs.
1277 * Always called with preemption disabled. 1277 * Always called with preemption disabled.
1278 */ 1278 */
1279void __cpuinit setup_local_APIC(void) 1279void setup_local_APIC(void)
1280{ 1280{
1281 int cpu = smp_processor_id(); 1281 int cpu = smp_processor_id();
1282 unsigned int value, queued; 1282 unsigned int value, queued;
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void)
1471#endif 1471#endif
1472} 1472}
1473 1473
1474void __cpuinit end_local_APIC_setup(void) 1474void end_local_APIC_setup(void)
1475{ 1475{
1476 lapic_setup_esr(); 1476 lapic_setup_esr();
1477 1477
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void __cpuinit generic_processor_info(int apicid, int version) 2110void generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = {
2377 .suspend = lapic_suspend, 2377 .suspend = lapic_suspend,
2378}; 2378};
2379 2379
2380static void __cpuinit apic_pm_activate(void) 2380static void apic_pm_activate(void)
2381{ 2381{
2382 apic_pm_state.active = 1; 2382 apic_pm_state.active = 1;
2383} 2383}
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { }
2402 2402
2403#ifdef CONFIG_X86_64 2403#ifdef CONFIG_X86_64
2404 2404
2405static int __cpuinit apic_cluster_num(void) 2405static int apic_cluster_num(void)
2406{ 2406{
2407 int i, clusters, zeros; 2407 int i, clusters, zeros;
2408 unsigned id; 2408 unsigned id;
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void)
2447 return clusters; 2447 return clusters;
2448} 2448}
2449 2449
2450static int __cpuinitdata multi_checked; 2450static int multi_checked;
2451static int __cpuinitdata multi; 2451static int multi;
2452 2452
2453static int __cpuinit set_multi(const struct dmi_system_id *d) 2453static int set_multi(const struct dmi_system_id *d)
2454{ 2454{
2455 if (multi) 2455 if (multi)
2456 return 0; 2456 return 0;
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d)
2459 return 0; 2459 return 0;
2460} 2460}
2461 2461
2462static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { 2462static const struct dmi_system_id multi_dmi_table[] = {
2463 { 2463 {
2464 .callback = set_multi, 2464 .callback = set_multi,
2465 .ident = "IBM System Summit2", 2465 .ident = "IBM System Summit2",
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2471 {} 2471 {}
2472}; 2472};
2473 2473
2474static void __cpuinit dmi_check_multi(void) 2474static void dmi_check_multi(void)
2475{ 2475{
2476 if (multi_checked) 2476 if (multi_checked)
2477 return; 2477 return;
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void)
2488 * multi-chassis. 2488 * multi-chassis.
2489 * Use DMI to check them 2489 * Use DMI to check them
2490 */ 2490 */
2491__cpuinit int apic_is_clustered_box(void) 2491int apic_is_clustered_box(void)
2492{ 2492{
2493 dmi_check_multi(); 2493 dmi_check_multi();
2494 if (multi) 2494 if (multi)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9a9110918ca7..3e67f9e3d7ef 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
74 return initial_apic_id >> index_msb; 74 return initial_apic_id >> index_msb;
75} 75}
76 76
77static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 77static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
78{ 78{
79 union numachip_csr_g3_ext_irq_gen int_gen; 79 union numachip_csr_g3_ext_irq_gen int_gen;
80 80
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799a98c6..c55224731b2d 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -130,7 +130,7 @@ int es7000_plat;
130 */ 130 */
131 131
132 132
133static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 133static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
134{ 134{
135 unsigned long vect = 0, psaival = 0; 135 unsigned long vect = 0, psaival = 0;
136 136
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d661ee95cabf..1e42e8f305ee 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void)
105 } 105 }
106} 106}
107 107
108void __cpuinit numaq_tsc_disable(void) 108void numaq_tsc_disable(void)
109{ 109{
110 if (!found_numaq) 110 if (!found_numaq)
111 return; 111 return;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4ff0e5..140e29db478d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void)
148 /* 148 /*
149 * At CPU state changes, update the x2apic cluster sibling info. 149 * At CPU state changes, update the x2apic cluster sibling info.
150 */ 150 */
151static int __cpuinit 151static int
152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) 152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
153{ 153{
154 unsigned int this_cpu = (unsigned long)hcpu; 154 unsigned int this_cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 63092afb142e..1191ac1c9d25 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
209unsigned long sn_rtc_cycles_per_second; 209unsigned long sn_rtc_cycles_per_second;
210EXPORT_SYMBOL(sn_rtc_cycles_per_second); 210EXPORT_SYMBOL(sn_rtc_cycles_per_second);
211 211
212static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 212static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
213{ 213{
214#ifdef CONFIG_SMP 214#ifdef CONFIG_SMP
215 unsigned long val; 215 unsigned long val;
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
417}; 417};
418 418
419static __cpuinit void set_x2apic_extra_bits(int pnode) 419static void set_x2apic_extra_bits(int pnode)
420{ 420{
421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); 421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
422} 422}
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored)
735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
736} 736}
737 737
738static void __cpuinit uv_heartbeat_enable(int cpu) 738static void uv_heartbeat_enable(int cpu)
739{ 739{
740 while (!uv_cpu_hub_info(cpu)->scir.enabled) { 740 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu)
752} 752}
753 753
754#ifdef CONFIG_HOTPLUG_CPU 754#ifdef CONFIG_HOTPLUG_CPU
755static void __cpuinit uv_heartbeat_disable(int cpu) 755static void uv_heartbeat_disable(int cpu)
756{ 756{
757 if (uv_cpu_hub_info(cpu)->scir.enabled) { 757 if (uv_cpu_hub_info(cpu)->scir.enabled) {
758 uv_cpu_hub_info(cpu)->scir.enabled = 0; 758 uv_cpu_hub_info(cpu)->scir.enabled = 0;
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu)
764/* 764/*
765 * cpu hotplug notifier 765 * cpu hotplug notifier
766 */ 766 */
767static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 767static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
768 unsigned long action, void *hcpu) 768 void *hcpu)
769{ 769{
770 long cpu = (long)hcpu; 770 long cpu = (long)hcpu;
771 771
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
835 * Called on each cpu to initialize the per_cpu UV data area. 835 * Called on each cpu to initialize the per_cpu UV data area.
836 * FIXME: hotplug not supported yet 836 * FIXME: hotplug not supported yet
837 */ 837 */
838void __cpuinit uv_cpu_init(void) 838void uv_cpu_init(void)
839{ 839{
840 /* CPU 0 initilization will be done via uv_system_init. */ 840 /* CPU 0 initilization will be done via uv_system_init. */
841 if (!uv_blade_info) 841 if (!uv_blade_info)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c587a8757227..f654ecefea5b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
69extern void vide(void); 69extern void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".align 4\nvide: ret");
71 71
72static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
74/* 74/*
75 * General Systems BIOSen alias the cpu frequency registers 75 * General Systems BIOSen alias the cpu frequency registers
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
87} 87}
88 88
89 89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 90static void init_amd_k6(struct cpuinfo_x86 *c)
91{ 91{
92 u32 l, h; 92 u32 l, h;
93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
179 } 179 }
180} 180}
181 181
182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 182static void amd_k7_smp_check(struct cpuinfo_x86 *c)
183{ 183{
184 /* calling is from identify_secondary_cpu() ? */ 184 /* calling is from identify_secondary_cpu() ? */
185 if (!c->cpu_index) 185 if (!c->cpu_index)
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223} 223}
224 224
225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void init_amd_k7(struct cpuinfo_x86 *c)
226{ 226{
227 u32 l, h; 227 u32 l, h;
228 228
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
267 * To workaround broken NUMA config. Read the comment in 267 * To workaround broken NUMA config. Read the comment in
268 * srat_detect_node(). 268 * srat_detect_node().
269 */ 269 */
270static int __cpuinit nearby_node(int apicid) 270static int nearby_node(int apicid)
271{ 271{
272 int i, node; 272 int i, node;
273 273
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid)
292 * (2) AMD processors supporting compute units 292 * (2) AMD processors supporting compute units
293 */ 293 */
294#ifdef CONFIG_X86_HT 294#ifdef CONFIG_X86_HT
295static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 295static void amd_get_topology(struct cpuinfo_x86 *c)
296{ 296{
297 u32 nodes, cores_per_cu = 1; 297 u32 nodes, cores_per_cu = 1;
298 u8 node_id; 298 u8 node_id;
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
343 * Assumes number of cores is a power of two. 343 * Assumes number of cores is a power of two.
344 */ 344 */
345static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 345static void amd_detect_cmp(struct cpuinfo_x86 *c)
346{ 346{
347#ifdef CONFIG_X86_HT 347#ifdef CONFIG_X86_HT
348 unsigned bits; 348 unsigned bits;
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu)
369} 369}
370EXPORT_SYMBOL_GPL(amd_get_nb_id); 370EXPORT_SYMBOL_GPL(amd_get_nb_id);
371 371
372static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 372static void srat_detect_node(struct cpuinfo_x86 *c)
373{ 373{
374#ifdef CONFIG_NUMA 374#ifdef CONFIG_NUMA
375 int cpu = smp_processor_id(); 375 int cpu = smp_processor_id();
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
421#endif 421#endif
422} 422}
423 423
424static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 424static void early_init_amd_mc(struct cpuinfo_x86 *c)
425{ 425{
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 unsigned bits, ecx; 427 unsigned bits, ecx;
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
447#endif 447#endif
448} 448}
449 449
450static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 450static void bsp_init_amd(struct cpuinfo_x86 *c)
451{ 451{
452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
453 453
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
475 } 475 }
476} 476}
477 477
478static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 478static void early_init_amd(struct cpuinfo_x86 *c)
479{ 479{
480 early_init_amd_mc(c); 480 early_init_amd_mc(c);
481 481
@@ -514,7 +514,7 @@ static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(const int *erratum);
516 516
517static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
519 u32 dummy; 519 u32 dummy;
520 unsigned long long value; 520 unsigned long long value;
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
740} 740}
741 741
742#ifdef CONFIG_X86_32 742#ifdef CONFIG_X86_32
743static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 743static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
744 unsigned int size)
745{ 744{
746 /* AMD errata T13 (order #21922) */ 745 /* AMD errata T13 (order #21922) */
747 if ((c->x86 == 6)) { 746 if ((c->x86 == 6)) {
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
757} 756}
758#endif 757#endif
759 758
760static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 759static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
761{ 760{
762 tlb_flushall_shift = 5; 761 tlb_flushall_shift = 5;
763 762
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
765 tlb_flushall_shift = 4; 764 tlb_flushall_shift = 4;
766} 765}
767 766
768static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 767static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
769{ 768{
770 u32 ebx, eax, ecx, edx; 769 u32 ebx, eax, ecx, edx;
771 u16 mask = 0xfff; 770 u16 mask = 0xfff;
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
820 cpu_set_tlb_flushall_shift(c); 819 cpu_set_tlb_flushall_shift(c);
821} 820}
822 821
823static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 822static const struct cpu_dev amd_cpu_dev = {
824 .c_vendor = "AMD", 823 .c_vendor = "AMD",
825 .c_ident = { "AuthenticAMD" }, 824 .c_ident = { "AuthenticAMD" },
826#ifdef CONFIG_X86_32 825#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 159103c0b1f4..fbf6c3bc2400 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,7 +11,7 @@
11 11
12#ifdef CONFIG_X86_OOSTORE 12#ifdef CONFIG_X86_OOSTORE
13 13
14static u32 __cpuinit power2(u32 x) 14static u32 power2(u32 x)
15{ 15{
16 u32 s = 1; 16 u32 s = 1;
17 17
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x)
25/* 25/*
26 * Set up an actual MCR 26 * Set up an actual MCR
27 */ 27 */
28static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 28static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
29{ 29{
30 u32 lo, hi; 30 u32 lo, hi;
31 31
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
42 * 42 *
43 * Shortcut: We know you can't put 4Gig of RAM on a winchip 43 * Shortcut: We know you can't put 4Gig of RAM on a winchip
44 */ 44 */
45static u32 __cpuinit ramtop(void) 45static u32 ramtop(void)
46{ 46{
47 u32 clip = 0xFFFFFFFFUL; 47 u32 clip = 0xFFFFFFFFUL;
48 u32 top = 0; 48 u32 top = 0;
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void)
91/* 91/*
92 * Compute a set of MCR's to give maximum coverage 92 * Compute a set of MCR's to give maximum coverage
93 */ 93 */
94static int __cpuinit centaur_mcr_compute(int nr, int key) 94static int centaur_mcr_compute(int nr, int key)
95{ 95{
96 u32 mem = ramtop(); 96 u32 mem = ramtop();
97 u32 root = power2(mem); 97 u32 root = power2(mem);
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
157 return ct; 157 return ct;
158} 158}
159 159
160static void __cpuinit centaur_create_optimal_mcr(void) 160static void centaur_create_optimal_mcr(void)
161{ 161{
162 int used; 162 int used;
163 int i; 163 int i;
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
181 wrmsr(MSR_IDT_MCR0+i, 0, 0); 181 wrmsr(MSR_IDT_MCR0+i, 0, 0);
182} 182}
183 183
184static void __cpuinit winchip2_create_optimal_mcr(void) 184static void winchip2_create_optimal_mcr(void)
185{ 185{
186 u32 lo, hi; 186 u32 lo, hi;
187 int used; 187 int used;
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
217/* 217/*
218 * Handle the MCR key on the Winchip 2. 218 * Handle the MCR key on the Winchip 2.
219 */ 219 */
220static void __cpuinit winchip2_unprotect_mcr(void) 220static void winchip2_unprotect_mcr(void)
221{ 221{
222 u32 lo, hi; 222 u32 lo, hi;
223 u32 key; 223 u32 key;
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void)
229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
230} 230}
231 231
232static void __cpuinit winchip2_protect_mcr(void) 232static void winchip2_protect_mcr(void)
233{ 233{
234 u32 lo, hi; 234 u32 lo, hi;
235 235
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void)
247#define RNG_ENABLED (1 << 3) 247#define RNG_ENABLED (1 << 3)
248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ 248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
249 249
250static void __cpuinit init_c3(struct cpuinfo_x86 *c) 250static void init_c3(struct cpuinfo_x86 *c)
251{ 251{
252 u32 lo, hi; 252 u32 lo, hi;
253 253
@@ -318,7 +318,7 @@ enum {
318 EAMD3D = 1<<20, 318 EAMD3D = 1<<20,
319}; 319};
320 320
321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 321static void early_init_centaur(struct cpuinfo_x86 *c)
322{ 322{
323 switch (c->x86) { 323 switch (c->x86) {
324#ifdef CONFIG_X86_32 324#ifdef CONFIG_X86_32
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
337#endif 337#endif
338} 338}
339 339
340static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 340static void init_centaur(struct cpuinfo_x86 *c)
341{ 341{
342#ifdef CONFIG_X86_32 342#ifdef CONFIG_X86_32
343 char *name; 343 char *name;
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
468#endif 468#endif
469} 469}
470 470
471static unsigned int __cpuinit 471static unsigned int
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{ 473{
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
488 return size; 488 return size;
489} 489}
490 490
491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 491static const struct cpu_dev centaur_cpu_dev = {
492 .c_vendor = "Centaur", 492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" }, 493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur, 494 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 548bd039784e..25eb2747b063 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void)
63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
64} 64}
65 65
66static void __cpuinit default_init(struct cpuinfo_x86 *c) 66static void default_init(struct cpuinfo_x86 *c)
67{ 67{
68#ifdef CONFIG_X86_64 68#ifdef CONFIG_X86_64
69 cpu_detect_cache_sizes(c); 69 cpu_detect_cache_sizes(c);
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
80#endif 80#endif
81} 81}
82 82
83static const struct cpu_dev __cpuinitconst default_cpu = { 83static const struct cpu_dev default_cpu = {
84 .c_init = default_init, 84 .c_init = default_init,
85 .c_vendor = "Unknown", 85 .c_vendor = "Unknown",
86 .c_x86_vendor = X86_VENDOR_UNKNOWN, 86 .c_x86_vendor = X86_VENDOR_UNKNOWN,
87}; 87};
88 88
89static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 89static const struct cpu_dev *this_cpu = &default_cpu;
90 90
91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
92#ifdef CONFIG_X86_64 92#ifdef CONFIG_X86_64
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s)
160__setup("noxsaveopt", x86_xsaveopt_setup); 160__setup("noxsaveopt", x86_xsaveopt_setup);
161 161
162#ifdef CONFIG_X86_32 162#ifdef CONFIG_X86_32
163static int cachesize_override __cpuinitdata = -1; 163static int cachesize_override = -1;
164static int disable_x86_serial_nr __cpuinitdata = 1; 164static int disable_x86_serial_nr = 1;
165 165
166static int __init cachesize_setup(char *str) 166static int __init cachesize_setup(char *str)
167{ 167{
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag)
215} 215}
216 216
217/* Probe for the CPUID instruction */ 217/* Probe for the CPUID instruction */
218int __cpuinit have_cpuid_p(void) 218int have_cpuid_p(void)
219{ 219{
220 return flag_is_changeable_p(X86_EFLAGS_ID); 220 return flag_is_changeable_p(X86_EFLAGS_ID);
221} 221}
222 222
223static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 223static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224{ 224{
225 unsigned long lo, hi; 225 unsigned long lo, hi;
226 226
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature {
298 u32 level; 298 u32 level;
299}; 299};
300 300
301static const struct cpuid_dependent_feature __cpuinitconst 301static const struct cpuid_dependent_feature
302cpuid_dependent_features[] = { 302cpuid_dependent_features[] = {
303 { X86_FEATURE_MWAIT, 0x00000005 }, 303 { X86_FEATURE_MWAIT, 0x00000005 },
304 { X86_FEATURE_DCA, 0x00000009 }, 304 { X86_FEATURE_DCA, 0x00000009 },
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = {
306 { 0, 0 } 306 { 0, 0 }
307}; 307};
308 308
309static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
310{ 310{
311 const struct cpuid_dependent_feature *df; 311 const struct cpuid_dependent_feature *df;
312 312
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
344 */ 344 */
345 345
346/* Look up CPU names by table lookup. */ 346/* Look up CPU names by table lookup. */
347static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 347static const char *table_lookup_model(struct cpuinfo_x86 *c)
348{ 348{
349 const struct cpu_model_info *info; 349 const struct cpu_model_info *info;
350 350
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
364 return NULL; /* Not found */ 364 return NULL; /* Not found */
365} 365}
366 366
367__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 367__u32 cpu_caps_cleared[NCAPINTS];
368__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 368__u32 cpu_caps_set[NCAPINTS];
369 369
370void load_percpu_segment(int cpu) 370void load_percpu_segment(int cpu)
371{ 371{
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu)
394 load_percpu_segment(cpu); 394 load_percpu_segment(cpu);
395} 395}
396 396
397static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 397static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
398 398
399static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 399static void get_model_name(struct cpuinfo_x86 *c)
400{ 400{
401 unsigned int *v; 401 unsigned int *v;
402 char *p, *q; 402 char *p, *q;
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
425 } 425 }
426} 426}
427 427
428void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 428void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
429{ 429{
430 unsigned int n, dummy, ebx, ecx, edx, l2size; 430 unsigned int n, dummy, ebx, ecx, edx, l2size;
431 431
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
479 */ 479 */
480s8 __read_mostly tlb_flushall_shift = -1; 480s8 __read_mostly tlb_flushall_shift = -1;
481 481
482void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) 482void cpu_detect_tlb(struct cpuinfo_x86 *c)
483{ 483{
484 if (this_cpu->c_detect_tlb) 484 if (this_cpu->c_detect_tlb)
485 this_cpu->c_detect_tlb(c); 485 this_cpu->c_detect_tlb(c);
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
493 tlb_flushall_shift); 493 tlb_flushall_shift);
494} 494}
495 495
496void __cpuinit detect_ht(struct cpuinfo_x86 *c) 496void detect_ht(struct cpuinfo_x86 *c)
497{ 497{
498#ifdef CONFIG_X86_HT 498#ifdef CONFIG_X86_HT
499 u32 eax, ebx, ecx, edx; 499 u32 eax, ebx, ecx, edx;
@@ -544,7 +544,7 @@ out:
544#endif 544#endif
545} 545}
546 546
547static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 547static void get_cpu_vendor(struct cpuinfo_x86 *c)
548{ 548{
549 char *v = c->x86_vendor_id; 549 char *v = c->x86_vendor_id;
550 int i; 550 int i;
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
571 this_cpu = &default_cpu; 571 this_cpu = &default_cpu;
572} 572}
573 573
574void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 574void cpu_detect(struct cpuinfo_x86 *c)
575{ 575{
576 /* Get vendor name */ 576 /* Get vendor name */
577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
601 } 601 }
602} 602}
603 603
604void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 604void get_cpu_cap(struct cpuinfo_x86 *c)
605{ 605{
606 u32 tfms, xlvl; 606 u32 tfms, xlvl;
607 u32 ebx; 607 u32 ebx;
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
652 init_scattered_cpuid_features(c); 652 init_scattered_cpuid_features(c);
653} 653}
654 654
655static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 655static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
656{ 656{
657#ifdef CONFIG_X86_32 657#ifdef CONFIG_X86_32
658 int i; 658 int i;
@@ -769,7 +769,7 @@ void __init early_cpu_init(void)
769 * unless we can find a reliable way to detect all the broken cases. 769 * unless we can find a reliable way to detect all the broken cases.
770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
771 */ 771 */
772static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 772static void detect_nopl(struct cpuinfo_x86 *c)
773{ 773{
774#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
775 clear_cpu_cap(c, X86_FEATURE_NOPL); 775 clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
778#endif 778#endif
779} 779}
780 780
781static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 781static void generic_identify(struct cpuinfo_x86 *c)
782{ 782{
783 c->extended_cpuid_level = 0; 783 c->extended_cpuid_level = 0;
784 784
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
815/* 815/*
816 * This does the hard work of actually picking apart the CPU stuff... 816 * This does the hard work of actually picking apart the CPU stuff...
817 */ 817 */
818static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 818static void identify_cpu(struct cpuinfo_x86 *c)
819{ 819{
820 int i; 820 int i;
821 821
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void)
960 cpu_detect_tlb(&boot_cpu_data); 960 cpu_detect_tlb(&boot_cpu_data);
961} 961}
962 962
963void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 963void identify_secondary_cpu(struct cpuinfo_x86 *c)
964{ 964{
965 BUG_ON(c == &boot_cpu_data); 965 BUG_ON(c == &boot_cpu_data);
966 identify_cpu(c); 966 identify_cpu(c);
@@ -975,14 +975,14 @@ struct msr_range {
975 unsigned max; 975 unsigned max;
976}; 976};
977 977
978static const struct msr_range msr_range_array[] __cpuinitconst = { 978static const struct msr_range msr_range_array[] = {
979 { 0x00000000, 0x00000418}, 979 { 0x00000000, 0x00000418},
980 { 0xc0000000, 0xc000040b}, 980 { 0xc0000000, 0xc000040b},
981 { 0xc0010000, 0xc0010142}, 981 { 0xc0010000, 0xc0010142},
982 { 0xc0011000, 0xc001103b}, 982 { 0xc0011000, 0xc001103b},
983}; 983};
984 984
985static void __cpuinit __print_cpu_msr(void) 985static void __print_cpu_msr(void)
986{ 986{
987 unsigned index_min, index_max; 987 unsigned index_min, index_max;
988 unsigned index; 988 unsigned index;
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void)
1001 } 1001 }
1002} 1002}
1003 1003
1004static int show_msr __cpuinitdata; 1004static int show_msr;
1005 1005
1006static __init int setup_show_msr(char *arg) 1006static __init int setup_show_msr(char *arg)
1007{ 1007{
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg)
1022} 1022}
1023__setup("noclflush", setup_noclflush); 1023__setup("noclflush", setup_noclflush);
1024 1024
1025void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1025void print_cpu_info(struct cpuinfo_x86 *c)
1026{ 1026{
1027 const char *vendor = NULL; 1027 const char *vendor = NULL;
1028 1028
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1051 print_cpu_msr(c); 1051 print_cpu_msr(c);
1052} 1052}
1053 1053
1054void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1054void print_cpu_msr(struct cpuinfo_x86 *c)
1055{ 1055{
1056 if (c->cpu_index < show_msr) 1056 if (c->cpu_index < show_msr)
1057 __print_cpu_msr(); 1057 __print_cpu_msr();
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void)
1216 */ 1216 */
1217#ifdef CONFIG_X86_64 1217#ifdef CONFIG_X86_64
1218 1218
1219void __cpuinit cpu_init(void) 1219void cpu_init(void)
1220{ 1220{
1221 struct orig_ist *oist; 1221 struct orig_ist *oist;
1222 struct task_struct *me; 1222 struct task_struct *me;
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void)
1315 1315
1316#else 1316#else
1317 1317
1318void __cpuinit cpu_init(void) 1318void cpu_init(void)
1319{ 1319{
1320 int cpu = smp_processor_id(); 1320 int cpu = smp_processor_id();
1321 struct task_struct *curr = current; 1321 struct task_struct *curr = current;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7582f475b163..d0969c75ab54 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,7 +15,7 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 21
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 } 44 }
45} 45}
46 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 47static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 50
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
59 * Actually since bugs.h doesn't even reference this perhaps someone should 59 * Actually since bugs.h doesn't even reference this perhaps someone should
60 * fix the documentation ??? 60 * fix the documentation ???
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb = 0;
63 63
64static const char __cpuinitconst Cx86_model[][9] = { 64static const char Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static const char __cpuinitconst Cx486_name[][5] = { 68static const char Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static const char __cpuinitconst Cx486S_name[][4] = { 72static const char Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static const char __cpuinitconst Cx486D_name[][4] = { 75static const char Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] = "?.5x Core/Bus Clock";
79static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; 79static const char cyrix_model_mult1[] = "12??43";
80static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; 80static const char cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP 87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
88 */ 88 */
89 89
90static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) 90static void check_cx686_slop(struct cpuinfo_x86 *c)
91{ 91{
92 unsigned long flags; 92 unsigned long flags;
93 93
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
112} 112}
113 113
114 114
115static void __cpuinit set_cx86_reorder(void) 115static void set_cx86_reorder(void)
116{ 116{
117 u8 ccr3; 117 u8 ccr3;
118 118
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void)
127 setCx86(CX86_CCR3, ccr3); 127 setCx86(CX86_CCR3, ccr3);
128} 128}
129 129
130static void __cpuinit set_cx86_memwb(void) 130static void set_cx86_memwb(void)
131{ 131{
132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 133
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void)
143 * Configure later MediaGX and/or Geode processor. 143 * Configure later MediaGX and/or Geode processor.
144 */ 144 */
145 145
146static void __cpuinit geode_configure(void) 146static void geode_configure(void)
147{ 147{
148 unsigned long flags; 148 unsigned long flags;
149 u8 ccr3; 149 u8 ccr3;
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void)
166 local_irq_restore(flags); 166 local_irq_restore(flags);
167} 167}
168 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 169static void early_init_cyrix(struct cpuinfo_x86 *c)
170{ 170{
171 unsigned char dir0, dir0_msn, dir1 = 0; 171 unsigned char dir0, dir0_msn, dir1 = 0;
172 172
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
185 } 185 }
186} 186}
187 187
188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void init_cyrix(struct cpuinfo_x86 *c)
189{ 189{
190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; 190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 char *buf = c->x86_model_id; 191 char *buf = c->x86_model_id;
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
356/* 356/*
357 * Handle National Semiconductor branded processors 357 * Handle National Semiconductor branded processors
358 */ 358 */
359static void __cpuinit init_nsc(struct cpuinfo_x86 *c) 359static void init_nsc(struct cpuinfo_x86 *c)
360{ 360{
361 /* 361 /*
362 * There may be GX1 processors in the wild that are branded 362 * There may be GX1 processors in the wild that are branded
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void)
405 return (unsigned char) (test >> 8) == 0x02; 405 return (unsigned char) (test >> 8) == 0x02;
406} 406}
407 407
408static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) 408static void cyrix_identify(struct cpuinfo_x86 *c)
409{ 409{
410 /* Detect Cyrix with disabled CPUID */ 410 /* Detect Cyrix with disabled CPUID */
411 if (c->x86 == 4 && test_cyrix_52div()) { 411 if (c->x86 == 4 && test_cyrix_52div()) {
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
441 } 441 }
442} 442}
443 443
444static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { 444static const struct cpu_dev cyrix_cpu_dev = {
445 .c_vendor = "Cyrix", 445 .c_vendor = "Cyrix",
446 .c_ident = { "CyrixInstead" }, 446 .c_ident = { "CyrixInstead" },
447 .c_early_init = early_init_cyrix, 447 .c_early_init = early_init_cyrix,
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
452 452
453cpu_dev_register(cyrix_cpu_dev); 453cpu_dev_register(cyrix_cpu_dev);
454 454
455static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { 455static const struct cpu_dev nsc_cpu_dev = {
456 .c_vendor = "NSC", 456 .c_vendor = "NSC",
457 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
458 .c_init = init_nsc, 458 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 1e7e84a02eba..87279212d318 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void)
60 } 60 }
61} 61}
62 62
63void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 63void init_hypervisor(struct cpuinfo_x86 *c)
64{ 64{
65 if (x86_hyper && x86_hyper->set_cpu_features) 65 if (x86_hyper && x86_hyper->set_cpu_features)
66 x86_hyper->set_cpu_features(c); 66 x86_hyper->set_cpu_features(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..ec7299566f79 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -26,7 +26,7 @@
26#include <asm/apic.h> 26#include <asm/apic.h>
27#endif 27#endif
28 28
29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void early_init_intel(struct cpuinfo_x86 *c)
30{ 30{
31 u64 misc_enable; 31 u64 misc_enable;
32 32
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
163 * This is called before we do cpu ident work 163 * This is called before we do cpu ident work
164 */ 164 */
165 165
166int __cpuinit ppro_with_ram_bug(void) 166int ppro_with_ram_bug(void)
167{ 167{
168 /* Uses data from early_cpu_detect now */ 168 /* Uses data from early_cpu_detect now */
169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void)
176 return 0; 176 return 0;
177} 177}
178 178
179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void intel_smp_check(struct cpuinfo_x86 *c)
180{ 180{
181 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
182 if (!c->cpu_index) 182 if (!c->cpu_index)
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
196 } 196 }
197} 197}
198 198
199static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 199static void intel_workarounds(struct cpuinfo_x86 *c)
200{ 200{
201 unsigned long lo, hi; 201 unsigned long lo, hi;
202 202
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
275 intel_smp_check(c); 275 intel_smp_check(c);
276} 276}
277#else 277#else
278static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 278static void intel_workarounds(struct cpuinfo_x86 *c)
279{ 279{
280} 280}
281#endif 281#endif
282 282
283static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 283static void srat_detect_node(struct cpuinfo_x86 *c)
284{ 284{
285#ifdef CONFIG_NUMA 285#ifdef CONFIG_NUMA
286 unsigned node; 286 unsigned node;
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
300/* 300/*
301 * find out the number of processor cores on the die 301 * find out the number of processor cores on the die
302 */ 302 */
303static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 303static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
304{ 304{
305 unsigned int eax, ebx, ecx, edx; 305 unsigned int eax, ebx, ecx, edx;
306 306
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
315 return 1; 315 return 1;
316} 316}
317 317
318static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 318static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
319{ 319{
320 /* Intel VMX MSR indicated features */ 320 /* Intel VMX MSR indicated features */
321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
353 } 353 }
354} 354}
355 355
356static void __cpuinit init_intel(struct cpuinfo_x86 *c) 356static void init_intel(struct cpuinfo_x86 *c)
357{ 357{
358 unsigned int l2 = 0; 358 unsigned int l2 = 0;
359 359
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
472} 472}
473 473
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
475static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 475static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
476{ 476{
477 /* 477 /*
478 * Intel PIII Tualatin. This comes in two flavours. 478 * Intel PIII Tualatin. This comes in two flavours.
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
506 506
507#define STLB_4K 0x41 507#define STLB_4K 0x41
508 508
509static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 509static const struct _tlb_table intel_tlb_table[] = {
510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
536 { 0x00, 0, 0 } 536 { 0x00, 0, 0 }
537}; 537};
538 538
539static void __cpuinit intel_tlb_lookup(const unsigned char desc) 539static void intel_tlb_lookup(const unsigned char desc)
540{ 540{
541 unsigned char k; 541 unsigned char k;
542 if (desc == 0) 542 if (desc == 0)
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
605 } 605 }
606} 606}
607 607
608static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 608static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
609{ 609{
610 switch ((c->x86 << 8) + c->x86_model) { 610 switch ((c->x86 << 8) + c->x86_model) {
611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
634 } 634 }
635} 635}
636 636
637static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 637static void intel_detect_tlb(struct cpuinfo_x86 *c)
638{ 638{
639 int i, j, n; 639 int i, j, n;
640 unsigned int regs[4]; 640 unsigned int regs[4];
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
661 intel_tlb_flushall_shift_set(c); 661 intel_tlb_flushall_shift_set(c);
662} 662}
663 663
664static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 664static const struct cpu_dev intel_cpu_dev = {
665 .c_vendor = "Intel", 665 .c_vendor = "Intel",
666 .c_ident = { "GenuineIntel" }, 666 .c_ident = { "GenuineIntel" },
667#ifdef CONFIG_X86_32 667#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8dc72dda66fe..1414c90feaba 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -37,7 +37,7 @@ struct _cache_table {
37/* All the cache descriptor types we care about (no TLB or 37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */ 38 trace cache entries) */
39 39
40static const struct _cache_table __cpuinitconst cache_table[] = 40static const struct _cache_table cache_table[] =
41{ 41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -203,7 +203,7 @@ union l3_cache {
203 unsigned val; 203 unsigned val;
204}; 204};
205 205
206static const unsigned short __cpuinitconst assocs[] = { 206static const unsigned short assocs[] = {
207 [1] = 1, 207 [1] = 1,
208 [2] = 2, 208 [2] = 2,
209 [4] = 4, 209 [4] = 4,
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */ 217 [0xf] = 0xffff /* fully associative - no way to show this currently */
218}; 218};
219 219
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; 220static const unsigned char levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; 221static const unsigned char types[] = { 1, 2, 3, 3 };
222 222
223static void __cpuinit 223static void
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx, 225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx) 226 union _cpuid4_leaf_ecx *ecx)
@@ -302,7 +302,7 @@ struct _cache_attr {
302/* 302/*
303 * L3 cache descriptors 303 * L3 cache descriptors
304 */ 304 */
305static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) 305static void amd_calc_l3_indices(struct amd_northbridge *nb)
306{ 306{
307 struct amd_l3_cache *l3 = &nb->l3_cache; 307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3; 308 unsigned int sc0, sc1, sc2, sc3;
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326} 326}
327 327
328static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 328static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329{ 329{
330 int node; 330 int node;
331 331
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches =
528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
529 529
530static int 530static int
531__cpuinit cpuid4_cache_lookup_regs(int index, 531cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
532 struct _cpuid4_info_regs *this_leaf)
533{ 532{
534 union _cpuid4_leaf_eax eax; 533 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx; 534 union _cpuid4_leaf_ebx ebx;
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
560 return 0; 559 return 0;
561} 560}
562 561
563static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) 562static int find_num_cache_leaves(struct cpuinfo_x86 *c)
564{ 563{
565 unsigned int eax, ebx, ecx, edx, op; 564 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax; 565 union _cpuid4_leaf_eax cache_eax;
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
580 return i; 579 return i;
581} 580}
582 581
583void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) 582void init_amd_cacheinfo(struct cpuinfo_x86 *c)
584{ 583{
585 584
586 if (cpu_has_topoext) { 585 if (cpu_has_topoext) {
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
593 } 592 }
594} 593}
595 594
596unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 595unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
597{ 596{
598 /* Cache sizes */ 597 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; 598 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
744 743
745#ifdef CONFIG_SMP 744#ifdef CONFIG_SMP
746 745
747static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 746static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748{ 747{
749 struct _cpuid4_info *this_leaf; 748 struct _cpuid4_info *this_leaf;
750 int i, sibling; 749 int i, sibling;
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
793 return 1; 792 return 1;
794} 793}
795 794
796static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 795static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
797{ 796{
798 struct _cpuid4_info *this_leaf, *sibling_leaf; 797 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing; 798 unsigned long num_threads_sharing;
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
828 } 827 }
829 } 828 }
830} 829}
831static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 830static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
832{ 831{
833 struct _cpuid4_info *this_leaf, *sibling_leaf; 832 struct _cpuid4_info *this_leaf, *sibling_leaf;
834 int sibling; 833 int sibling;
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
841 } 840 }
842} 841}
843#else 842#else
844static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 843static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
845{ 844{
846} 845}
847 846
848static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 847static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
849{ 848{
850} 849}
851#endif 850#endif
852 851
853static void __cpuinit free_cache_attributes(unsigned int cpu) 852static void free_cache_attributes(unsigned int cpu)
854{ 853{
855 int i; 854 int i;
856 855
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
861 per_cpu(ici_cpuid4_info, cpu) = NULL; 860 per_cpu(ici_cpuid4_info, cpu) = NULL;
862} 861}
863 862
864static void __cpuinit get_cpu_leaves(void *_retval) 863static void get_cpu_leaves(void *_retval)
865{ 864{
866 int j, *retval = _retval, cpu = smp_processor_id(); 865 int j, *retval = _retval, cpu = smp_processor_id();
867 866
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval)
881 } 880 }
882} 881}
883 882
884static int __cpuinit detect_cache_attributes(unsigned int cpu) 883static int detect_cache_attributes(unsigned int cpu)
885{ 884{
886 int retval; 885 int retval;
887 886
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = {
1015}; 1014};
1016 1015
1017#ifdef CONFIG_AMD_NB 1016#ifdef CONFIG_AMD_NB
1018static struct attribute ** __cpuinit amd_l3_attrs(void) 1017static struct attribute **amd_l3_attrs(void)
1019{ 1018{
1020 static struct attribute **attrs; 1019 static struct attribute **attrs;
1021 int n; 1020 int n;
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops, 1090 .sysfs_ops = &sysfs_ops,
1092}; 1091};
1093 1092
1094static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 1093static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1095{ 1094{
1096 kfree(per_cpu(ici_cache_kobject, cpu)); 1095 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu)); 1096 kfree(per_cpu(ici_index_kobject, cpu));
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1100 free_cache_attributes(cpu); 1099 free_cache_attributes(cpu);
1101} 1100}
1102 1101
1103static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 1102static int cpuid4_cache_sysfs_init(unsigned int cpu)
1104{ 1103{
1105 int err; 1104 int err;
1106 1105
@@ -1132,7 +1131,7 @@ err_out:
1132static DECLARE_BITMAP(cache_dev_map, NR_CPUS); 1131static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1133 1132
1134/* Add/Remove cache interface for CPU device */ 1133/* Add/Remove cache interface for CPU device */
1135static int __cpuinit cache_add_dev(struct device *dev) 1134static int cache_add_dev(struct device *dev)
1136{ 1135{
1137 unsigned int cpu = dev->id; 1136 unsigned int cpu = dev->id;
1138 unsigned long i, j; 1137 unsigned long i, j;
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev)
1183 return 0; 1182 return 0;
1184} 1183}
1185 1184
1186static void __cpuinit cache_remove_dev(struct device *dev) 1185static void cache_remove_dev(struct device *dev)
1187{ 1186{
1188 unsigned int cpu = dev->id; 1187 unsigned int cpu = dev->id;
1189 unsigned long i; 1188 unsigned long i;
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev)
1200 cpuid4_cache_sysfs_exit(cpu); 1199 cpuid4_cache_sysfs_exit(cpu);
1201} 1200}
1202 1201
1203static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 1202static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu) 1203 unsigned long action, void *hcpu)
1205{ 1204{
1206 unsigned int cpu = (unsigned long)hcpu; 1205 unsigned int cpu = (unsigned long)hcpu;
1207 struct device *dev; 1206 struct device *dev;
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1220 return NOTIFY_OK; 1219 return NOTIFY_OK;
1221} 1220}
1222 1221
1223static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { 1222static struct notifier_block cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback, 1223 .notifier_call = cacheinfo_cpu_callback,
1225}; 1224};
1226 1225
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bf49cdbb010f..87a65c939bcd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void)
1363} 1363}
1364EXPORT_SYMBOL_GPL(mce_notify_irq); 1364EXPORT_SYMBOL_GPL(mce_notify_irq);
1365 1365
1366static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1366static int __mcheck_cpu_mce_banks_init(void)
1367{ 1367{
1368 int i; 1368 int i;
1369 u8 num_banks = mca_cfg.banks; 1369 u8 num_banks = mca_cfg.banks;
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1384/* 1384/*
1385 * Initialize Machine Checks for a CPU. 1385 * Initialize Machine Checks for a CPU.
1386 */ 1386 */
1387static int __cpuinit __mcheck_cpu_cap_init(void) 1387static int __mcheck_cpu_cap_init(void)
1388{ 1388{
1389 unsigned b; 1389 unsigned b;
1390 u64 cap; 1390 u64 cap;
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1483} 1483}
1484 1484
1485/* Add per CPU specific workarounds here */ 1485/* Add per CPU specific workarounds here */
1486static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1486static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1487{ 1487{
1488 struct mca_config *cfg = &mca_cfg; 1488 struct mca_config *cfg = &mca_cfg;
1489 1489
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1593 return 0; 1593 return 0;
1594} 1594}
1595 1595
1596static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1596static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1597{ 1597{
1598 if (c->x86 != 5) 1598 if (c->x86 != 5)
1599 return 0; 1599 return 0;
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
1664 * Called for each booted CPU to set up machine checks. 1664 * Called for each booted CPU to set up machine checks.
1665 * Must be called with preempt off: 1665 * Must be called with preempt off:
1666 */ 1666 */
1667void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1667void mcheck_cpu_init(struct cpuinfo_x86 *c)
1668{ 1668{
1669 if (mca_cfg.disabled) 1669 if (mca_cfg.disabled)
1670 return; 1670 return;
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = {
2082 2082
2083DEFINE_PER_CPU(struct device *, mce_device); 2083DEFINE_PER_CPU(struct device *, mce_device);
2084 2084
2085__cpuinitdata
2086void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 2085void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2087 2086
2088static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) 2087static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev)
2228} 2227}
2229 2228
2230/* Per cpu device init. All of the cpus still share the same ctrl bank: */ 2229/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2231static __cpuinit int mce_device_create(unsigned int cpu) 2230static int mce_device_create(unsigned int cpu)
2232{ 2231{
2233 struct device *dev; 2232 struct device *dev;
2234 int err; 2233 int err;
@@ -2274,7 +2273,7 @@ error:
2274 return err; 2273 return err;
2275} 2274}
2276 2275
2277static __cpuinit void mce_device_remove(unsigned int cpu) 2276static void mce_device_remove(unsigned int cpu)
2278{ 2277{
2279 struct device *dev = per_cpu(mce_device, cpu); 2278 struct device *dev = per_cpu(mce_device, cpu);
2280 int i; 2279 int i;
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2294} 2293}
2295 2294
2296/* Make sure there are no machine checks on offlined CPUs. */ 2295/* Make sure there are no machine checks on offlined CPUs. */
2297static void __cpuinit mce_disable_cpu(void *h) 2296static void mce_disable_cpu(void *h)
2298{ 2297{
2299 unsigned long action = *(unsigned long *)h; 2298 unsigned long action = *(unsigned long *)h;
2300 int i; 2299 int i;
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2312 } 2311 }
2313} 2312}
2314 2313
2315static void __cpuinit mce_reenable_cpu(void *h) 2314static void mce_reenable_cpu(void *h)
2316{ 2315{
2317 unsigned long action = *(unsigned long *)h; 2316 unsigned long action = *(unsigned long *)h;
2318 int i; 2317 int i;
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2331} 2330}
2332 2331
2333/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 2332/* Get notified when a cpu comes on/off. Be hotplug friendly. */
2334static int __cpuinit 2333static int
2335mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 2334mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2336{ 2335{
2337 unsigned int cpu = (unsigned long)hcpu; 2336 unsigned int cpu = (unsigned long)hcpu;
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2367 return NOTIFY_OK; 2366 return NOTIFY_OK;
2368} 2367}
2369 2368
2370static struct notifier_block mce_cpu_notifier __cpuinitdata = { 2369static struct notifier_block mce_cpu_notifier = {
2371 .notifier_call = mce_cpu_callback, 2370 .notifier_call = mce_cpu_callback,
2372}; 2371};
2373 2372
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..603df4f74640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = {
458 .default_attrs = default_attrs, 458 .default_attrs = default_attrs,
459}; 459};
460 460
461static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 461static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
462 unsigned int bank, 462 unsigned int block, u32 address)
463 unsigned int block,
464 u32 address)
465{ 463{
466 struct threshold_block *b = NULL; 464 struct threshold_block *b = NULL;
467 u32 low, high; 465 u32 low, high;
@@ -543,7 +541,7 @@ out_free:
543 return err; 541 return err;
544} 542}
545 543
546static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) 544static int __threshold_add_blocks(struct threshold_bank *b)
547{ 545{
548 struct list_head *head = &b->blocks->miscj; 546 struct list_head *head = &b->blocks->miscj;
549 struct threshold_block *pos = NULL; 547 struct threshold_block *pos = NULL;
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
567 return err; 565 return err;
568} 566}
569 567
570static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 568static int threshold_create_bank(unsigned int cpu, unsigned int bank)
571{ 569{
572 struct device *dev = per_cpu(mce_device, cpu); 570 struct device *dev = per_cpu(mce_device, cpu);
573 struct amd_northbridge *nb = NULL; 571 struct amd_northbridge *nb = NULL;
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
632} 630}
633 631
634/* create dir/files for all valid threshold banks */ 632/* create dir/files for all valid threshold banks */
635static __cpuinit int threshold_create_device(unsigned int cpu) 633static int threshold_create_device(unsigned int cpu)
636{ 634{
637 unsigned int bank; 635 unsigned int bank;
638 struct threshold_bank **bp; 636 struct threshold_bank **bp;
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu)
736} 734}
737 735
738/* get notified when a cpu comes on/off */ 736/* get notified when a cpu comes on/off */
739static void __cpuinit 737static void
740amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) 738amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
741{ 739{
742 switch (action) { 740 switch (action) {
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 41e8e00a6637..3eec7de76efb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -240,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup);
240 240
241#ifdef CONFIG_SYSFS 241#ifdef CONFIG_SYSFS
242/* Add/Remove thermal_throttle interface for CPU device: */ 242/* Add/Remove thermal_throttle interface for CPU device: */
243static __cpuinit int thermal_throttle_add_dev(struct device *dev, 243static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
244 unsigned int cpu)
245{ 244{
246 int err; 245 int err;
247 struct cpuinfo_x86 *c = &cpu_data(cpu); 246 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -267,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
267 return err; 266 return err;
268} 267}
269 268
270static __cpuinit void thermal_throttle_remove_dev(struct device *dev) 269static void thermal_throttle_remove_dev(struct device *dev)
271{ 270{
272 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
273} 272}
@@ -276,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
276static DEFINE_MUTEX(therm_cpu_lock); 275static DEFINE_MUTEX(therm_cpu_lock);
277 276
278/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 277/* Get notified when a cpu comes on/off. Be hotplug friendly. */
279static __cpuinit int 278static int
280thermal_throttle_cpu_callback(struct notifier_block *nfb, 279thermal_throttle_cpu_callback(struct notifier_block *nfb,
281 unsigned long action, 280 unsigned long action,
282 void *hcpu) 281 void *hcpu)
@@ -307,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
307 return notifier_from_errno(err); 306 return notifier_from_errno(err);
308} 307}
309 308
310static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 309static struct notifier_block thermal_throttle_cpu_notifier =
311{ 310{
312 .notifier_call = thermal_throttle_cpu_callback, 311 .notifier_call = thermal_throttle_cpu_callback,
313}; 312};
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9e581c5cf6d0..a7c7305030cc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1295struct event_constraint emptyconstraint; 1295struct event_constraint emptyconstraint;
1296struct event_constraint unconstrained; 1296struct event_constraint unconstrained;
1297 1297
1298static int __cpuinit 1298static int
1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1300{ 1300{
1301 unsigned int cpu = (long)hcpu; 1301 unsigned int cpu = (long)hcpu;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..e09f0bfb7b8f 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy)
851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
852} 852}
853 853
854static int __cpuinit 854static int
855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
856{ 856{
857 switch (action & ~CPU_TASKS_FROZEN) { 857 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index c0c661adf03e..754291adec33 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = {
288 .read = amd_uncore_read, 288 .read = amd_uncore_read,
289}; 289};
290 290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) 291static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
292{ 292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, 293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu)); 294 cpu_to_node(cpu));
295} 295}
296 296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) 297static void amd_uncore_cpu_up_prepare(unsigned int cpu)
298{ 298{
299 struct amd_uncore *uncore; 299 struct amd_uncore *uncore;
300 300
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
322} 322}
323 323
324static struct amd_uncore * 324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, 325amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores) 326 struct amd_uncore * __percpu *uncores)
327{ 327{
328 unsigned int cpu; 328 unsigned int cpu;
329 struct amd_uncore *that; 329 struct amd_uncore *that;
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
348 return this; 348 return this;
349} 349}
350 350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) 351static void amd_uncore_cpu_starting(unsigned int cpu)
352{ 352{
353 unsigned int eax, ebx, ecx, edx; 353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore; 354 struct amd_uncore *uncore;
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
376 } 376 }
377} 377}
378 378
379static void __cpuinit uncore_online(unsigned int cpu, 379static void uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores) 380 struct amd_uncore * __percpu *uncores)
381{ 381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383 383
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu,
388 cpumask_set_cpu(cpu, uncore->active_mask); 388 cpumask_set_cpu(cpu, uncore->active_mask);
389} 389}
390 390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) 391static void amd_uncore_cpu_online(unsigned int cpu)
392{ 392{
393 if (amd_uncore_nb) 393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb); 394 uncore_online(cpu, amd_uncore_nb);
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
397 uncore_online(cpu, amd_uncore_l2); 397 uncore_online(cpu, amd_uncore_l2);
398} 398}
399 399
400static void __cpuinit uncore_down_prepare(unsigned int cpu, 400static void uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores) 401 struct amd_uncore * __percpu *uncores)
402{ 402{
403 unsigned int i; 403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); 404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu,
423 } 423 }
424} 424}
425 425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) 426static void amd_uncore_cpu_down_prepare(unsigned int cpu)
427{ 427{
428 if (amd_uncore_nb) 428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb); 429 uncore_down_prepare(cpu, amd_uncore_nb);
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
432 uncore_down_prepare(cpu, amd_uncore_l2); 432 uncore_down_prepare(cpu, amd_uncore_l2);
433} 433}
434 434
435static void __cpuinit uncore_dead(unsigned int cpu, 435static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
436 struct amd_uncore * __percpu *uncores)
437{ 436{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 437 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439 438
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu,
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; 444 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446} 445}
447 446
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) 447static void amd_uncore_cpu_dead(unsigned int cpu)
449{ 448{
450 if (amd_uncore_nb) 449 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb); 450 uncore_dead(cpu, amd_uncore_nb);
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
454 uncore_dead(cpu, amd_uncore_l2); 453 uncore_dead(cpu, amd_uncore_l2);
455} 454}
456 455
457static int __cpuinit 456static int
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, 457amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu) 458 void *hcpu)
460{ 459{
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
489 return NOTIFY_OK; 488 return NOTIFY_OK;
490} 489}
491 490
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { 491static struct notifier_block amd_uncore_cpu_notifier_block = {
493 .notifier_call = amd_uncore_cpu_notifier, 492 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1, 493 .priority = CPU_PRI_PERF + 1,
495}; 494};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9dd99751ccf9..cad791dbde95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void)
3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ 3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3298static LIST_HEAD(boxes_to_free); 3298static LIST_HEAD(boxes_to_free);
3299 3299
3300static void __cpuinit uncore_kfree_boxes(void) 3300static void uncore_kfree_boxes(void)
3301{ 3301{
3302 struct intel_uncore_box *box; 3302 struct intel_uncore_box *box;
3303 3303
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void)
3309 } 3309 }
3310} 3310}
3311 3311
3312static void __cpuinit uncore_cpu_dying(int cpu) 3312static void uncore_cpu_dying(int cpu)
3313{ 3313{
3314 struct intel_uncore_type *type; 3314 struct intel_uncore_type *type;
3315 struct intel_uncore_pmu *pmu; 3315 struct intel_uncore_pmu *pmu;
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
3328 } 3328 }
3329} 3329}
3330 3330
3331static int __cpuinit uncore_cpu_starting(int cpu) 3331static int uncore_cpu_starting(int cpu)
3332{ 3332{
3333 struct intel_uncore_type *type; 3333 struct intel_uncore_type *type;
3334 struct intel_uncore_pmu *pmu; 3334 struct intel_uncore_pmu *pmu;
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu)
3371 return 0; 3371 return 0;
3372} 3372}
3373 3373
3374static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) 3374static int uncore_cpu_prepare(int cpu, int phys_id)
3375{ 3375{
3376 struct intel_uncore_type *type; 3376 struct intel_uncore_type *type;
3377 struct intel_uncore_pmu *pmu; 3377 struct intel_uncore_pmu *pmu;
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
3397 return 0; 3397 return 0;
3398} 3398}
3399 3399
3400static void __cpuinit 3400static void
3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) 3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3402{ 3402{
3403 struct intel_uncore_type *type; 3403 struct intel_uncore_type *type;
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c
3435 } 3435 }
3436} 3436}
3437 3437
3438static void __cpuinit uncore_event_exit_cpu(int cpu) 3438static void uncore_event_exit_cpu(int cpu)
3439{ 3439{
3440 int i, phys_id, target; 3440 int i, phys_id, target;
3441 3441
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
3463 uncore_change_context(pci_uncores, cpu, target); 3463 uncore_change_context(pci_uncores, cpu, target);
3464} 3464}
3465 3465
3466static void __cpuinit uncore_event_init_cpu(int cpu) 3466static void uncore_event_init_cpu(int cpu)
3467{ 3467{
3468 int i, phys_id; 3468 int i, phys_id;
3469 3469
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
3479 uncore_change_context(pci_uncores, -1, cpu); 3479 uncore_change_context(pci_uncores, -1, cpu);
3480} 3480}
3481 3481
3482static int 3482static int uncore_cpu_notifier(struct notifier_block *self,
3483 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 3483 unsigned long action, void *hcpu)
3484{ 3484{
3485 unsigned int cpu = (long)hcpu; 3485 unsigned int cpu = (long)hcpu;
3486 3486
@@ -3520,7 +3520,7 @@ static int
3520 return NOTIFY_OK; 3520 return NOTIFY_OK;
3521} 3521}
3522 3522
3523static struct notifier_block uncore_cpu_nb __cpuinitdata = { 3523static struct notifier_block uncore_cpu_nb = {
3524 .notifier_call = uncore_cpu_notifier, 3524 .notifier_call = uncore_cpu_notifier,
3525 /* 3525 /*
3526 * to migrate uncore events, our notifier should be executed 3526 * to migrate uncore events, our notifier should be executed
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index feca286c2bb4..88db010845cb 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v)
52 */ 52 */
53#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) 53#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
54 54
55void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) 55void x86_init_rdrand(struct cpuinfo_x86 *c)
56{ 56{
57#ifdef CONFIG_ARCH_RANDOM 57#ifdef CONFIG_ARCH_RANDOM
58 unsigned long tmp; 58 unsigned long tmp;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d92b5dad15dd..f2cc63e9cf08 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -24,13 +24,13 @@ enum cpuid_regs {
24 CR_EBX 24 CR_EBX
25}; 25};
26 26
27void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) 27void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28{ 28{
29 u32 max_level; 29 u32 max_level;
30 u32 regs[4]; 30 u32 regs[4];
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, 34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4397e987a1cf..4c60eaf0571c 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,7 +26,7 @@
26 * exists, use it for populating initial_apicid and cpu topology 26 * exists, use it for populating initial_apicid and cpu topology
27 * detection. 27 * detection.
28 */ 28 */
29void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 29void detect_extended_topology(struct cpuinfo_x86 *c)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 unsigned int eax, ebx, ecx, edx, sub_index; 32 unsigned int eax, ebx, ecx, edx, sub_index;
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 28000743bbb0..aa0430d69b90 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,7 +5,7 @@
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include "cpu.h" 6#include "cpu.h"
7 7
8static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) 8static void early_init_transmeta(struct cpuinfo_x86 *c)
9{ 9{
10 u32 xlvl; 10 u32 xlvl;
11 11
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
17 } 17 }
18} 18}
19 19
20static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20static void init_transmeta(struct cpuinfo_x86 *c)
21{ 21{
22 unsigned int cap_mask, uk, max, dummy; 22 unsigned int cap_mask, uk, max, dummy;
23 unsigned int cms_rev1, cms_rev2; 23 unsigned int cms_rev1, cms_rev2;
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { 101static const struct cpu_dev transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index fd2c37bf7acb..202759a14121 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static const struct cpu_dev __cpuinitconst umc_cpu_dev = { 11static const struct cpu_dev umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a36321ec54..7076878404ec 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void)
122 * so that the kernel could just trust the hypervisor with providing a 122 * so that the kernel could just trust the hypervisor with providing a
123 * reliable virtual TSC that is suitable for timekeeping. 123 * reliable virtual TSC that is suitable for timekeeping.
124 */ 124 */
125static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) 125static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
126{ 126{
127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1e4dbcfe6d31..7d9481c743f8 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = {
137 .open = cpuid_open, 137 .open = cpuid_open,
138}; 138};
139 139
140static __cpuinit int cpuid_device_create(int cpu) 140static int cpuid_device_create(int cpu)
141{ 141{
142 struct device *dev; 142 struct device *dev;
143 143
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu)
151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
152} 152}
153 153
154static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 154static int cpuid_class_cpu_callback(struct notifier_block *nfb,
155 unsigned long action, 155 unsigned long action, void *hcpu)
156 void *hcpu)
157{ 156{
158 unsigned int cpu = (unsigned long)hcpu; 157 unsigned int cpu = (unsigned long)hcpu;
159 int err = 0; 158 int err = 0;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 4934890e4db2..69eb2fa25494 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
133{ 133{
134} 134}
135 135
136void __cpuinit x86_of_pci_init(void) 136void x86_of_pci_init(void)
137{ 137{
138 pcibios_enable_irq = x86_of_pci_irq_enable; 138 pcibios_enable_irq = x86_of_pci_irq_enable;
139 pcibios_disable_irq = x86_of_pci_irq_disable; 139 pcibios_disable_irq = x86_of_pci_irq_disable;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e65ddc62e113..5dd87a89f011 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0)
292 * If cpu hotplug is not supported then this code can go in init section 292 * If cpu hotplug is not supported then this code can go in init section
293 * which will be freed later 293 * which will be freed later
294 */ 294 */
295__CPUINIT
296ENTRY(startup_32_smp) 295ENTRY(startup_32_smp)
297 cld 296 cld
298 movl $(__BOOT_DS),%eax 297 movl $(__BOOT_DS),%eax
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 5e4d8a8a5c40..e1aabdb314c8 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -512,21 +512,6 @@ ENTRY(phys_base)
512 512
513#include "../../x86/xen/xen-head.S" 513#include "../../x86/xen/xen-head.S"
514 514
515 .section .bss, "aw", @nobits
516 .align L1_CACHE_BYTES
517ENTRY(idt_table)
518 .skip IDT_ENTRIES * 16
519
520 .align L1_CACHE_BYTES
521ENTRY(debug_idt_table)
522 .skip IDT_ENTRIES * 16
523
524#ifdef CONFIG_TRACING
525 .align L1_CACHE_BYTES
526ENTRY(trace_idt_table)
527 .skip IDT_ENTRIES * 16
528#endif
529
530 __PAGE_ALIGNED_BSS 515 __PAGE_ALIGNED_BSS
531NEXT_PAGE(empty_zero_page) 516NEXT_PAGE(empty_zero_page)
532 .skip PAGE_SIZE 517 .skip PAGE_SIZE
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746f6b1a..202d24f0f7e7 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(unlazy_fpu);
108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
109unsigned int xstate_size; 109unsigned int xstate_size;
110EXPORT_SYMBOL_GPL(xstate_size); 110EXPORT_SYMBOL_GPL(xstate_size);
111static struct i387_fxsave_struct fx_scratch __cpuinitdata; 111static struct i387_fxsave_struct fx_scratch;
112 112
113static void __cpuinit mxcsr_feature_mask_init(void) 113static void mxcsr_feature_mask_init(void)
114{ 114{
115 unsigned long mask = 0; 115 unsigned long mask = 0;
116 116
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
124 mxcsr_feature_mask &= mask; 124 mxcsr_feature_mask &= mask;
125} 125}
126 126
127static void __cpuinit init_thread_xstate(void) 127static void init_thread_xstate(void)
128{ 128{
129 /* 129 /*
130 * Note that xstate_size might be overwriten later during 130 * Note that xstate_size might be overwriten later during
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void)
153 * into all processes. 153 * into all processes.
154 */ 154 */
155 155
156void __cpuinit fpu_init(void) 156void fpu_init(void)
157{ 157{
158 unsigned long cr0; 158 unsigned long cr0;
159 unsigned long cr4_mask = 0; 159 unsigned long cr4_mask = 0;
@@ -608,7 +608,7 @@ static int __init no_387(char *s)
608 608
609__setup("no387", no_387); 609__setup("no387", no_387);
610 610
611void __cpuinit fpu_detect(struct cpuinfo_x86 *c) 611void fpu_detect(struct cpuinfo_x86 *c)
612{ 612{
613 unsigned long cr0; 613 unsigned long cr0;
614 u16 fsw, fcw; 614 u16 fsw, fcw;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 344faf8d0d62..4186755f1d7c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
119/* 119/*
120 * allocate per-cpu stacks for hardirq and for softirq processing 120 * allocate per-cpu stacks for hardirq and for softirq processing
121 */ 121 */
122void __cpuinit irq_ctx_init(int cpu) 122void irq_ctx_init(int cpu)
123{ 123{
124 union irq_ctx *irqctx; 124 union irq_ctx *irqctx;
125 125
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd6d9a5a42f6..a96d32cc55b8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320 apic_write(APIC_EOI, APIC_EOI_ACK); 320 apic_write(APIC_EOI, APIC_EOI_ACK);
321} 321}
322 322
323void __cpuinit kvm_guest_cpu_init(void) 323void kvm_guest_cpu_init(void)
324{ 324{
325 if (!kvm_para_available()) 325 if (!kvm_para_available())
326 return; 326 return;
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
421 native_smp_prepare_boot_cpu(); 421 native_smp_prepare_boot_cpu();
422} 422}
423 423
424static void __cpuinit kvm_guest_cpu_online(void *dummy) 424static void kvm_guest_cpu_online(void *dummy)
425{ 425{
426 kvm_guest_cpu_init(); 426 kvm_guest_cpu_init();
427} 427}
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy)
435 apf_task_wake_all(); 435 apf_task_wake_all();
436} 436}
437 437
438static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 438static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
439 unsigned long action, void *hcpu) 439 void *hcpu)
440{ 440{
441 int cpu = (unsigned long)hcpu; 441 int cpu = (unsigned long)hcpu;
442 switch (action) { 442 switch (action) {
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
455 return NOTIFY_OK; 455 return NOTIFY_OK;
456} 456}
457 457
458static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 458static struct notifier_block kvm_cpu_notifier = {
459 .notifier_call = kvm_cpu_notify, 459 .notifier_call = kvm_cpu_notify,
460}; 460};
461#endif 461#endif
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1f354f4b602b..1570e0741344 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -182,7 +182,7 @@ static void kvm_restore_sched_clock_state(void)
182} 182}
183 183
184#ifdef CONFIG_X86_LOCAL_APIC 184#ifdef CONFIG_X86_LOCAL_APIC
185static void __cpuinit kvm_setup_secondary_clock(void) 185static void kvm_setup_secondary_clock(void)
186{ 186{
187 /* 187 /*
188 * Now that the first cpu already had this clocksource initialized, 188 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1ac6e9aee766..1d14ffee5749 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void)
82 * load_microcode_amd() to save equivalent cpu table and microcode patches in 82 * load_microcode_amd() to save equivalent cpu table and microcode patches in
83 * kernel heap memory. 83 * kernel heap memory.
84 */ 84 */
85static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) 85static void apply_ucode_in_initrd(void *ucode, size_t size)
86{ 86{
87 struct equiv_cpu_entry *eq; 87 struct equiv_cpu_entry *eq;
88 u32 *header; 88 u32 *header;
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE];
206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which 206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
207 * is used upon resume from suspend. 207 * is used upon resume from suspend.
208 */ 208 */
209void __cpuinit load_ucode_amd_ap(void) 209void load_ucode_amd_ap(void)
210{ 210{
211 struct microcode_amd *mc; 211 struct microcode_amd *mc;
212 unsigned long *initrd; 212 unsigned long *initrd;
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
242 struct ucode_cpu_info *uci) 242 struct ucode_cpu_info *uci)
243{ 243{
244 u32 rev, eax; 244 u32 rev, eax;
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253} 253}
254 254
255void __cpuinit load_ucode_amd_ap(void) 255void load_ucode_amd_ap(void)
256{ 256{
257 unsigned int cpu = smp_processor_id(); 257 unsigned int cpu = smp_processor_id();
258 258
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 22db92bbdf1a..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = {
468 .resume = mc_bp_resume, 468 .resume = mc_bp_resume,
469}; 469};
470 470
471static __cpuinit int 471static int
472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (unsigned long)hcpu; 474 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 86119f63db0c..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -41,7 +41,7 @@
41 * 41 *
42 * x86_vendor() gets vendor information directly through cpuid. 42 * x86_vendor() gets vendor information directly through cpuid.
43 */ 43 */
44static int __cpuinit x86_vendor(void) 44static int x86_vendor(void)
45{ 45{
46 u32 eax = 0x00000000; 46 u32 eax = 0x00000000;
47 u32 ebx, ecx = 0, edx; 47 u32 ebx, ecx = 0, edx;
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void)
57 return X86_VENDOR_UNKNOWN; 57 return X86_VENDOR_UNKNOWN;
58} 58}
59 59
60static int __cpuinit x86_family(void) 60static int x86_family(void)
61{ 61{
62 u32 eax = 0x00000001; 62 u32 eax = 0x00000001;
63 u32 ebx, ecx = 0, edx; 63 u32 ebx, ecx = 0, edx;
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void)
96 } 96 }
97} 97}
98 98
99void __cpuinit load_ucode_ap(void) 99void load_ucode_ap(void)
100{ 100{
101 int vendor, x86; 101 int vendor, x86;
102 102
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index dabef95506f3..1575deb2e636 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -34,7 +34,7 @@ struct mc_saved_data {
34 struct microcode_intel **mc_saved; 34 struct microcode_intel **mc_saved;
35} mc_saved_data; 35} mc_saved_data;
36 36
37static enum ucode_state __cpuinit 37static enum ucode_state
38generic_load_microcode_early(struct microcode_intel **mc_saved_p, 38generic_load_microcode_early(struct microcode_intel **mc_saved_p,
39 unsigned int mc_saved_count, 39 unsigned int mc_saved_count,
40 struct ucode_cpu_info *uci) 40 struct ucode_cpu_info *uci)
@@ -69,7 +69,7 @@ out:
69 return state; 69 return state;
70} 70}
71 71
72static void __cpuinit 72static void
73microcode_pointer(struct microcode_intel **mc_saved, 73microcode_pointer(struct microcode_intel **mc_saved,
74 unsigned long *mc_saved_in_initrd, 74 unsigned long *mc_saved_in_initrd,
75 unsigned long initrd_start, int mc_saved_count) 75 unsigned long initrd_start, int mc_saved_count)
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved,
82} 82}
83 83
84#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
85static void __cpuinit 85static void
86microcode_phys(struct microcode_intel **mc_saved_tmp, 86microcode_phys(struct microcode_intel **mc_saved_tmp,
87 struct mc_saved_data *mc_saved_data) 87 struct mc_saved_data *mc_saved_data)
88{ 88{
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
101} 101}
102#endif 102#endif
103 103
104static enum ucode_state __cpuinit 104static enum ucode_state
105load_microcode(struct mc_saved_data *mc_saved_data, 105load_microcode(struct mc_saved_data *mc_saved_data,
106 unsigned long *mc_saved_in_initrd, 106 unsigned long *mc_saved_in_initrd,
107 unsigned long initrd_start, 107 unsigned long initrd_start,
@@ -375,7 +375,7 @@ do { \
375#define native_wrmsr(msr, low, high) \ 375#define native_wrmsr(msr, low, high) \
376 native_write_msr(msr, low, high); 376 native_write_msr(msr, low, high);
377 377
378static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) 378static int collect_cpu_info_early(struct ucode_cpu_info *uci)
379{ 379{
380 unsigned int val[2]; 380 unsigned int val[2];
381 u8 x86, x86_model; 381 u8 x86, x86_model;
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end,
584/* 584/*
585 * Print ucode update info. 585 * Print ucode update info.
586 */ 586 */
587static void __cpuinit 587static void
588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
589{ 589{
590 int cpu = smp_processor_id(); 590 int cpu = smp_processor_id();
@@ -605,7 +605,7 @@ static int current_mc_date;
605/* 605/*
606 * Print early updated ucode info after printk works. This is delayed info dump. 606 * Print early updated ucode info after printk works. This is delayed info dump.
607 */ 607 */
608void __cpuinit show_ucode_info_early(void) 608void show_ucode_info_early(void)
609{ 609{
610 struct ucode_cpu_info uci; 610 struct ucode_cpu_info uci;
611 611
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void)
621 * mc_saved_data.mc_saved and delay printing microcode info in 621 * mc_saved_data.mc_saved and delay printing microcode info in
622 * show_ucode_info_early() until printk() works. 622 * show_ucode_info_early() until printk() works.
623 */ 623 */
624static void __cpuinit print_ucode(struct ucode_cpu_info *uci) 624static void print_ucode(struct ucode_cpu_info *uci)
625{ 625{
626 struct microcode_intel *mc_intel; 626 struct microcode_intel *mc_intel;
627 int *delay_ucode_info_p; 627 int *delay_ucode_info_p;
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
643 * Flush global tlb. We only do this in x86_64 where paging has been enabled 643 * Flush global tlb. We only do this in x86_64 where paging has been enabled
644 * already and PGE should be enabled as well. 644 * already and PGE should be enabled as well.
645 */ 645 */
646static inline void __cpuinit flush_tlb_early(void) 646static inline void flush_tlb_early(void)
647{ 647{
648 __native_flush_tlb_global_irq_disabled(); 648 __native_flush_tlb_global_irq_disabled();
649} 649}
650 650
651static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) 651static inline void print_ucode(struct ucode_cpu_info *uci)
652{ 652{
653 struct microcode_intel *mc_intel; 653 struct microcode_intel *mc_intel;
654 654
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
660} 660}
661#endif 661#endif
662 662
663static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, 663static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
664 struct ucode_cpu_info *uci) 664 struct ucode_cpu_info *uci)
665{ 665{
666 struct microcode_intel *mc_intel; 666 struct microcode_intel *mc_intel;
667 unsigned int val[2]; 667 unsigned int val[2];
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void)
763#endif 763#endif
764} 764}
765 765
766void __cpuinit load_ucode_intel_ap(void) 766void load_ucode_intel_ap(void)
767{ 767{
768 struct mc_saved_data *mc_saved_data_p; 768 struct mc_saved_data *mc_saved_data_p;
769 struct ucode_cpu_info uci; 769 struct ucode_cpu_info uci;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index ac861b8348e2..f4c886d9165c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe {
24 u32 device; 24 u32 device;
25}; 25};
26 26
27static u64 __cpuinitdata fam10h_pci_mmconf_base; 27static u64 fam10h_pci_mmconf_base;
28 28
29static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29static struct pci_hostbridge_probe pci_probes[] = {
30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
32}; 32};
33 33
34static int __cpuinit cmp_range(const void *x1, const void *x2) 34static int cmp_range(const void *x1, const void *x2)
35{ 35{
36 const struct range *r1 = x1; 36 const struct range *r1 = x1;
37 const struct range *r2 = x2; 37 const struct range *r2 = x2;
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
52static void __cpuinit get_fam10h_pci_mmconf_base(void) 52static void get_fam10h_pci_mmconf_base(void)
53{ 53{
54 int i; 54 int i;
55 unsigned bus; 55 unsigned bus;
@@ -166,7 +166,7 @@ out:
166 fam10h_pci_mmconf_base = base; 166 fam10h_pci_mmconf_base = base;
167} 167}
168 168
169void __cpuinit fam10h_check_enable_mmcfg(void) 169void fam10h_check_enable_mmcfg(void)
170{ 170{
171 u64 val; 171 u64 val;
172 u32 address; 172 u32 address;
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
230 {} 230 {}
231}; 231};
232 232
233/* Called from a __cpuinit function, but only on the BSP. */ 233/* Called from a non __init function, but only on the BSP. */
234void __ref check_enable_amd_mmconf_dmi(void) 234void __ref check_enable_amd_mmconf_dmi(void)
235{ 235{
236 dmi_check_system(mmconf_dmi_table); 236 dmi_check_system(mmconf_dmi_table);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce130493b802..88458faea2f8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = {
200 .compat_ioctl = msr_ioctl, 200 .compat_ioctl = msr_ioctl,
201}; 201};
202 202
203static int __cpuinit msr_device_create(int cpu) 203static int msr_device_create(int cpu)
204{ 204{
205 struct device *dev; 205 struct device *dev;
206 206
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu)
214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
215} 215}
216 216
217static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, 217static int msr_class_cpu_callback(struct notifier_block *nfb,
218 unsigned long action, void *hcpu) 218 unsigned long action, void *hcpu)
219{ 219{
220 unsigned int cpu = (unsigned long)hcpu; 220 unsigned int cpu = (unsigned long)hcpu;
221 int err = 0; 221 int err = 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 81a5f5e8f142..83369e5a1d27 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,7 +398,7 @@ static void amd_e400_idle(void)
398 default_idle(); 398 default_idle();
399} 399}
400 400
401void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 401void select_idle_routine(const struct cpuinfo_x86 *c)
402{ 402{
403#ifdef CONFIG_SMP 403#ifdef CONFIG_SMP
404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e68709da8251..f8ec57815c05 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -170,7 +170,7 @@ static struct resource bss_resource = {
170 170
171#ifdef CONFIG_X86_32 171#ifdef CONFIG_X86_32
172/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
173struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data = {
174 .wp_works_ok = -1, 174 .wp_works_ok = -1,
175}; 175};
176/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..aecc98a93d1b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -130,7 +130,7 @@ atomic_t init_deasserted;
130 * Report back to the Boot Processor during boot time or to the caller processor 130 * Report back to the Boot Processor during boot time or to the caller processor
131 * during CPU online. 131 * during CPU online.
132 */ 132 */
133static void __cpuinit smp_callin(void) 133static void smp_callin(void)
134{ 134{
135 int cpuid, phys_id; 135 int cpuid, phys_id;
136 unsigned long timeout; 136 unsigned long timeout;
@@ -237,7 +237,7 @@ static int enable_start_cpu0;
237/* 237/*
238 * Activate a secondary processor. 238 * Activate a secondary processor.
239 */ 239 */
240notrace static void __cpuinit start_secondary(void *unused) 240static void notrace start_secondary(void *unused)
241{ 241{
242 /* 242 /*
243 * Don't put *anything* before cpu_init(), SMP booting is too 243 * Don't put *anything* before cpu_init(), SMP booting is too
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void)
300 * The bootstrap kernel entry code has set these up. Save them for 300 * The bootstrap kernel entry code has set these up. Save them for
301 * a given CPU 301 * a given CPU
302 */ 302 */
303void __cpuinit smp_store_cpu_info(int id) 303void smp_store_cpu_info(int id)
304{ 304{
305 struct cpuinfo_x86 *c = &cpu_data(id); 305 struct cpuinfo_x86 *c = &cpu_data(id);
306 306
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id)
313 identify_secondary_cpu(c); 313 identify_secondary_cpu(c);
314} 314}
315 315
316static bool __cpuinit 316static bool
317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
318{ 318{
319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -330,7 +330,7 @@ do { \
330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
331} while (0) 331} while (0)
332 332
333static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 333static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
334{ 334{
335 if (cpu_has_topoext) { 335 if (cpu_has_topoext) {
336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
348 return false; 348 return false;
349} 349}
350 350
351static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 351static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
352{ 352{
353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
354 354
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
359 return false; 359 return false;
360} 360}
361 361
362static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 362static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363{ 363{
364 if (c->phys_proc_id == o->phys_proc_id) { 364 if (c->phys_proc_id == o->phys_proc_id) {
365 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 365 if (cpu_has(c, X86_FEATURE_AMD_DCM))
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
370 return false; 370 return false;
371} 371}
372 372
373void __cpuinit set_cpu_sibling_map(int cpu) 373void set_cpu_sibling_map(int cpu)
374{ 374{
375 bool has_smt = smp_num_siblings > 1; 375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid)
499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
500 * won't ... remember to clear down the APIC, etc later. 500 * won't ... remember to clear down the APIC, etc later.
501 */ 501 */
502int __cpuinit 502int
503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
504{ 504{
505 unsigned long send_status, accept_status = 0; 505 unsigned long send_status, accept_status = 0;
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
533 return (send_status | accept_status); 533 return (send_status | accept_status);
534} 534}
535 535
536static int __cpuinit 536static int
537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
538{ 538{
539 unsigned long send_status, accept_status = 0; 539 unsigned long send_status, accept_status = 0;
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
649} 649}
650 650
651/* reduce the number of lines printed when booting a large cpu count system */ 651/* reduce the number of lines printed when booting a large cpu count system */
652static void __cpuinit announce_cpu(int cpu, int apicid) 652static void announce_cpu(int cpu, int apicid)
653{ 653{
654 static int current_node = -1; 654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 655 int node = early_cpu_to_node(cpu);
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
691 * We'll change this code in the future to wake up hard offlined CPU0 if 691 * We'll change this code in the future to wake up hard offlined CPU0 if
692 * real platform and request are available. 692 * real platform and request are available.
693 */ 693 */
694static int __cpuinit 694static int
695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
696 int *cpu0_nmi_registered) 696 int *cpu0_nmi_registered)
697{ 697{
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
731 * Returns zero if CPU booted OK, else error code from 731 * Returns zero if CPU booted OK, else error code from
732 * ->wakeup_secondary_cpu. 732 * ->wakeup_secondary_cpu.
733 */ 733 */
734static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 734static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
735{ 735{
736 volatile u32 *trampoline_status = 736 volatile u32 *trampoline_status =
737 (volatile u32 *) __va(real_mode_header->trampoline_status); 737 (volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
872 return boot_error; 872 return boot_error;
873} 873}
874 874
875int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 875int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
876{ 876{
877 int apicid = apic->cpu_present_to_apicid(cpu); 877 int apicid = apic->cpu_present_to_apicid(cpu);
878 unsigned long flags; 878 unsigned long flags;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3ff42d2f046d..addf7b58f4e8 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps)
320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); 320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
321} 321}
322 322
323static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, 323static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
324 unsigned long action, void *hcpu) 324 void *hcpu)
325{ 325{
326 switch (action) { 326 switch (action) {
327 case CPU_DYING: 327 case CPU_DYING:
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
334 return NOTIFY_OK; 334 return NOTIFY_OK;
335} 335}
336 336
337static struct notifier_block tboot_cpu_notifier __cpuinitdata = 337static struct notifier_block tboot_cpu_notifier =
338{ 338{
339 .notifier_call = tboot_cpu_callback, 339 .notifier_call = tboot_cpu_callback,
340}; 340};
diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
index 4e584a8d6edd..1c113db9ed57 100644
--- a/arch/x86/kernel/tracepoint.c
+++ b/arch/x86/kernel/tracepoint.c
@@ -12,10 +12,8 @@ atomic_t trace_idt_ctr = ATOMIC_INIT(0);
12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1, 12struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
13 (unsigned long) trace_idt_table }; 13 (unsigned long) trace_idt_table };
14 14
15#ifndef CONFIG_X86_64 15/* No need to be aligned, but done to keep all IDTs defined the same way. */
16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_data 16gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
17 = { { { { 0, 0 } } }, };
18#endif
19 17
20static int trace_irq_vector_refcount; 18static int trace_irq_vector_refcount;
21static DEFINE_MUTEX(irq_vector_mutex); 19static DEFINE_MUTEX(irq_vector_mutex);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b0865e88d3cc..1b23a1c92746 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -63,19 +63,19 @@
63#include <asm/x86_init.h> 63#include <asm/x86_init.h>
64#include <asm/pgalloc.h> 64#include <asm/pgalloc.h>
65#include <asm/proto.h> 65#include <asm/proto.h>
66
67/* No need to be aligned, but done to keep all IDTs defined the same way. */
68gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
66#else 69#else
67#include <asm/processor-flags.h> 70#include <asm/processor-flags.h>
68#include <asm/setup.h> 71#include <asm/setup.h>
69 72
70asmlinkage int system_call(void); 73asmlinkage int system_call(void);
71
72/*
73 * The IDT has to be page-aligned to simplify the Pentium
74 * F0 0F bug workaround.
75 */
76gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
77#endif 74#endif
78 75
76/* Must be page-aligned because the real IDT is used in a fixmap. */
77gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
78
79DECLARE_BITMAP(used_vectors, NR_VECTORS); 79DECLARE_BITMAP(used_vectors, NR_VECTORS);
80EXPORT_SYMBOL_GPL(used_vectors); 80EXPORT_SYMBOL_GPL(used_vectors);
81 81
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cfda72e..6ff49247edf8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void)
824 * Make an educated guess if the TSC is trustworthy and synchronized 824 * Make an educated guess if the TSC is trustworthy and synchronized
825 * over all CPUs. 825 * over all CPUs.
826 */ 826 */
827__cpuinit int unsynchronized_tsc(void) 827int unsynchronized_tsc(void)
828{ 828{
829 if (!cpu_has_tsc || tsc_unstable) 829 if (!cpu_has_tsc || tsc_unstable)
830 return 1; 830 return 1;
@@ -1020,7 +1020,7 @@ void __init tsc_init(void)
1020 * been calibrated. This assumes that CONSTANT_TSC applies to all 1020 * been calibrated. This assumes that CONSTANT_TSC applies to all
1021 * cpus in the socket - this should be a safe assumption. 1021 * cpus in the socket - this should be a safe assumption.
1022 */ 1022 */
1023unsigned long __cpuinit calibrate_delay_is_known(void) 1023unsigned long calibrate_delay_is_known(void)
1024{ 1024{
1025 int i, cpu = smp_processor_id(); 1025 int i, cpu = smp_processor_id();
1026 1026
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index fc25e60a5884..adfdf56a3714 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -25,24 +25,24 @@
25 * Entry/exit counters that make sure that both CPUs 25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once: 26 * run the measurement code at once:
27 */ 27 */
28static __cpuinitdata atomic_t start_count; 28static atomic_t start_count;
29static __cpuinitdata atomic_t stop_count; 29static atomic_t stop_count;
30 30
31/* 31/*
32 * We use a raw spinlock in this exceptional case, because 32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static cycles_t max_warp;
40static __cpuinitdata int nr_warps; 40static int nr_warps;
41 41
42/* 42/*
43 * TSC-warp measurement loop running on both CPUs: 43 * TSC-warp measurement loop running on both CPUs:
44 */ 44 */
45static __cpuinit void check_tsc_warp(unsigned int timeout) 45static void check_tsc_warp(unsigned int timeout)
46{ 46{
47 cycles_t start, now, prev, end; 47 cycles_t start, now, prev, end;
48 int i; 48 int i;
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)
121 * Source CPU calls into this - it waits for the freshly booted 121 * Source CPU calls into this - it waits for the freshly booted
122 * target CPU to arrive and then starts the measurement: 122 * target CPU to arrive and then starts the measurement:
123 */ 123 */
124void __cpuinit check_tsc_sync_source(int cpu) 124void check_tsc_sync_source(int cpu)
125{ 125{
126 int cpus = 2; 126 int cpus = 2;
127 127
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
187/* 187/*
188 * Freshly booted CPUs call into this: 188 * Freshly booted CPUs call into this:
189 */ 189 */
190void __cpuinit check_tsc_sync_target(void) 190void check_tsc_sync_target(void)
191{ 191{
192 int cpus = 2; 192 int cpus = 2;
193 193
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..1f96f9347ed9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -331,7 +331,7 @@ sigsegv:
331 * Assume __initcall executes before all user space. Hopefully kmod 331 * Assume __initcall executes before all user space. Hopefully kmod
332 * doesn't violate that. We'll find out if it does. 332 * doesn't violate that. We'll find out if it does.
333 */ 333 */
334static void __cpuinit vsyscall_set_cpu(int cpu) 334static void vsyscall_set_cpu(int cpu)
335{ 335{
336 unsigned long d; 336 unsigned long d;
337 unsigned long node = 0; 337 unsigned long node = 0;
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
354} 354}
355 355
356static void __cpuinit cpu_vsyscall_init(void *arg) 356static void cpu_vsyscall_init(void *arg)
357{ 357{
358 /* preemption should be already off */ 358 /* preemption should be already off */
359 vsyscall_set_cpu(raw_smp_processor_id()); 359 vsyscall_set_cpu(raw_smp_processor_id());
360} 360}
361 361
362static int __cpuinit 362static int
363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
364{ 364{
365 long cpu = (long)arg; 365 long cpu = (long)arg;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5f24c71accaa 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -25,7 +25,7 @@
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/mach_traps.h> 26#include <asm/mach_traps.h>
27 27
28void __cpuinit x86_init_noop(void) { } 28void x86_init_noop(void) { }
29void __init x86_init_uint_noop(unsigned int unused) { } 29void __init x86_init_uint_noop(unsigned int unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
85 }, 85 },
86}; 86};
87 87
88struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 88struct x86_cpuinit_ops x86_cpuinit = {
89 .early_percpu_clock_init = x86_init_noop, 89 .early_percpu_clock_init = x86_init_noop,
90 .setup_percpu_clockev = setup_secondary_APIC_clock, 90 .setup_percpu_clockev = setup_secondary_APIC_clock,
91}; 91};
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index d6c28acdf99c..422fd8223470 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void)
573 * This is somewhat obfuscated due to the lack of powerful enough 573 * This is somewhat obfuscated due to the lack of powerful enough
574 * overrides for the section checks. 574 * overrides for the section checks.
575 */ 575 */
576void __cpuinit xsave_init(void) 576void xsave_init(void)
577{ 577{
578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
579 void (*this_func)(void); 579 void (*this_func)(void);
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void)
594 setup_init_fpu_buf(); 594 setup_init_fpu_buf();
595} 595}
596 596
597void __cpuinit eager_fpu_init(void) 597void eager_fpu_init(void)
598{ 598{
599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
600 600
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0d094da49541..9e9285ae9b94 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2811,6 +2811,13 @@ exit:
2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code) 2811static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
2812{ 2812{
2813 /* 2813 /*
2814 * Do not fix the mmio spte with invalid generation number which
2815 * need to be updated by slow page fault path.
2816 */
2817 if (unlikely(error_code & PFERR_RSVD_MASK))
2818 return false;
2819
2820 /*
2814 * #PF can be fast only if the shadow page table is present and it 2821 * #PF can be fast only if the shadow page table is present and it
2815 * is caused by write-protect, that means we just need change the 2822 * is caused by write-protect, that means we just need change the
2816 * W bit of the spte which can be done out of mmu-lock. 2823 * W bit of the spte which can be done out of mmu-lock.
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index dc0b727742f4..0057a7accfb1 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -410,9 +410,7 @@ out:
410 pr_warning("multiple CPUs still online, may miss events.\n"); 410 pr_warning("multiple CPUs still online, may miss events.\n");
411} 411}
412 412
413/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, 413static void leave_uniprocessor(void)
414 but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
415static void __ref leave_uniprocessor(void)
416{ 414{
417 int cpu; 415 int cpu;
418 int err; 416 int err;
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a71c4e207679..8bf93bae1f13 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -60,7 +60,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE 60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61}; 61};
62 62
63int __cpuinit numa_cpu_node(int cpu) 63int numa_cpu_node(int cpu)
64{ 64{
65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66 66
@@ -691,12 +691,12 @@ void __init init_cpu_to_node(void)
691#ifndef CONFIG_DEBUG_PER_CPU_MAPS 691#ifndef CONFIG_DEBUG_PER_CPU_MAPS
692 692
693# ifndef CONFIG_NUMA_EMU 693# ifndef CONFIG_NUMA_EMU
694void __cpuinit numa_add_cpu(int cpu) 694void numa_add_cpu(int cpu)
695{ 695{
696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 696 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
697} 697}
698 698
699void __cpuinit numa_remove_cpu(int cpu) 699void numa_remove_cpu(int cpu)
700{ 700{
701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); 701 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
702} 702}
@@ -763,17 +763,17 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable)
763} 763}
764 764
765# ifndef CONFIG_NUMA_EMU 765# ifndef CONFIG_NUMA_EMU
766static void __cpuinit numa_set_cpumask(int cpu, bool enable) 766static void numa_set_cpumask(int cpu, bool enable)
767{ 767{
768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); 768 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
769} 769}
770 770
771void __cpuinit numa_add_cpu(int cpu) 771void numa_add_cpu(int cpu)
772{ 772{
773 numa_set_cpumask(cpu, true); 773 numa_set_cpumask(cpu, true);
774} 774}
775 775
776void __cpuinit numa_remove_cpu(int cpu) 776void numa_remove_cpu(int cpu)
777{ 777{
778 numa_set_cpumask(cpu, false); 778 numa_set_cpumask(cpu, false);
779} 779}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index dbbbb47260cc..a8f90ce3dedf 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -10,7 +10,7 @@
10 10
11#include "numa_internal.h" 11#include "numa_internal.h"
12 12
13static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; 13static int emu_nid_to_phys[MAX_NUMNODES];
14static char *emu_cmdline __initdata; 14static char *emu_cmdline __initdata;
15 15
16void __init numa_emu_cmdline(char *str) 16void __init numa_emu_cmdline(char *str)
@@ -444,7 +444,7 @@ no_emu:
444} 444}
445 445
446#ifndef CONFIG_DEBUG_PER_CPU_MAPS 446#ifndef CONFIG_DEBUG_PER_CPU_MAPS
447void __cpuinit numa_add_cpu(int cpu) 447void numa_add_cpu(int cpu)
448{ 448{
449 int physnid, nid; 449 int physnid, nid;
450 450
@@ -462,7 +462,7 @@ void __cpuinit numa_add_cpu(int cpu)
462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); 462 cpumask_set_cpu(cpu, node_to_cpumask_map[nid]);
463} 463}
464 464
465void __cpuinit numa_remove_cpu(int cpu) 465void numa_remove_cpu(int cpu)
466{ 466{
467 int i; 467 int i;
468 468
@@ -470,7 +470,7 @@ void __cpuinit numa_remove_cpu(int cpu)
470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 470 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
471} 471}
472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */ 472#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
473static void __cpuinit numa_set_cpumask(int cpu, bool enable) 473static void numa_set_cpumask(int cpu, bool enable)
474{ 474{
475 int nid, physnid; 475 int nid, physnid;
476 476
@@ -490,12 +490,12 @@ static void __cpuinit numa_set_cpumask(int cpu, bool enable)
490 } 490 }
491} 491}
492 492
493void __cpuinit numa_add_cpu(int cpu) 493void numa_add_cpu(int cpu)
494{ 494{
495 numa_set_cpumask(cpu, true); 495 numa_set_cpumask(cpu, true);
496} 496}
497 497
498void __cpuinit numa_remove_cpu(int cpu) 498void numa_remove_cpu(int cpu)
499{ 499{
500 numa_set_cpumask(cpu, false); 500 numa_set_cpumask(cpu, false);
501} 501}
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 410531d3c292..90555bf60aa4 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -5,7 +5,7 @@
5#include <asm/pgtable.h> 5#include <asm/pgtable.h>
6#include <asm/proto.h> 6#include <asm/proto.h>
7 7
8static int disable_nx __cpuinitdata; 8static int disable_nx;
9 9
10/* 10/*
11 * noexec = on|off 11 * noexec = on|off
@@ -29,7 +29,7 @@ static int __init noexec_setup(char *str)
29} 29}
30early_param("noexec", noexec_setup); 30early_param("noexec", noexec_setup);
31 31
32void __cpuinit x86_configure_nx(void) 32void x86_configure_nx(void)
33{ 33{
34 if (cpu_has_nx && !disable_nx) 34 if (cpu_has_nx && !disable_nx)
35 __supported_pte_mask |= _PAGE_NX; 35 __supported_pte_mask |= _PAGE_NX;
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index e9e6ed5cdf94..a48be98e9ded 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -312,7 +312,7 @@ static int __init early_fill_mp_bus_info(void)
312 312
313#define ENABLE_CF8_EXT_CFG (1ULL << 46) 313#define ENABLE_CF8_EXT_CFG (1ULL << 46)
314 314
315static void __cpuinit enable_pci_io_ecs(void *unused) 315static void enable_pci_io_ecs(void *unused)
316{ 316{
317 u64 reg; 317 u64 reg;
318 rdmsrl(MSR_AMD64_NB_CFG, reg); 318 rdmsrl(MSR_AMD64_NB_CFG, reg);
@@ -322,8 +322,8 @@ static void __cpuinit enable_pci_io_ecs(void *unused)
322 } 322 }
323} 323}
324 324
325static int __cpuinit amd_cpu_notify(struct notifier_block *self, 325static int amd_cpu_notify(struct notifier_block *self, unsigned long action,
326 unsigned long action, void *hcpu) 326 void *hcpu)
327{ 327{
328 int cpu = (long)hcpu; 328 int cpu = (long)hcpu;
329 switch (action) { 329 switch (action) {
@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
337 return NOTIFY_OK; 337 return NOTIFY_OK;
338} 338}
339 339
340static struct notifier_block __cpuinitdata amd_cpu_notifier = { 340static struct notifier_block amd_cpu_notifier = {
341 .notifier_call = amd_cpu_notify, 341 .notifier_call = amd_cpu_notify,
342}; 342};
343 343
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index f8ab4945892e..643b8b5eee86 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/serial_reg.h> 15#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17#include <linux/reboot.h>
17 18
18#include <asm/ce4100.h> 19#include <asm/ce4100.h>
19#include <asm/prom.h> 20#include <asm/prom.h>
@@ -134,7 +135,7 @@ static void __init sdv_arch_setup(void)
134} 135}
135 136
136#ifdef CONFIG_X86_IO_APIC 137#ifdef CONFIG_X86_IO_APIC
137static void __cpuinit sdv_pci_init(void) 138static void sdv_pci_init(void)
138{ 139{
139 x86_of_pci_init(); 140 x86_of_pci_init();
140 /* We can't set this earlier, because we need to calibrate the timer */ 141 /* We can't set this earlier, because we need to calibrate the timer */
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index c8d5577044bb..90f6ed127096 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -931,13 +931,6 @@ void __init efi_enter_virtual_mode(void)
931 va = efi_ioremap(md->phys_addr, size, 931 va = efi_ioremap(md->phys_addr, size,
932 md->type, md->attribute); 932 md->type, md->attribute);
933 933
934 if (!(md->attribute & EFI_MEMORY_RUNTIME)) {
935 if (!va)
936 pr_err("ioremap of 0x%llX failed!\n",
937 (unsigned long long)md->phys_addr);
938 continue;
939 }
940
941 md->virt_addr = (u64) (unsigned long) va; 934 md->virt_addr = (u64) (unsigned long) va;
942 935
943 if (!va) { 936 if (!va) {
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index a0a0a4389bbd..47fe66fe61f1 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -65,7 +65,7 @@
65 * lapic (always-on,ARAT) ------ 150 65 * lapic (always-on,ARAT) ------ 150
66 */ 66 */
67 67
68__cpuinitdata enum mrst_timer_options mrst_timer_options; 68enum mrst_timer_options mrst_timer_options;
69 69
70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; 70static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM];
71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; 71static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM];
@@ -248,7 +248,7 @@ static void __init mrst_time_init(void)
248 apbt_time_init(); 248 apbt_time_init();
249} 249}
250 250
251static void __cpuinit mrst_arch_setup(void) 251static void mrst_arch_setup(void)
252{ 252{
253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) 253 if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; 254 __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL;
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index ae7319db18ee..5e04a1c899fa 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -508,7 +508,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
508{ 508{
509 struct rt_sigframe __user *frame; 509 struct rt_sigframe __user *frame;
510 int err = 0; 510 int err = 0;
511 struct task_struct *me = current;
512 511
513 frame = (struct rt_sigframe __user *) 512 frame = (struct rt_sigframe __user *)
514 round_down(stack_top - sizeof(struct rt_sigframe), 16); 513 round_down(stack_top - sizeof(struct rt_sigframe), 16);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2fa02bc50034..193097ef3d7d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void)
1681 xen_domain_type = XEN_HVM_DOMAIN; 1681 xen_domain_type = XEN_HVM_DOMAIN;
1682} 1682}
1683 1683
1684static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, 1684static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1685 unsigned long action, void *hcpu) 1685 void *hcpu)
1686{ 1686{
1687 int cpu = (long)hcpu; 1687 int cpu = (long)hcpu;
1688 switch (action) { 1688 switch (action) {
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
1700 return NOTIFY_OK; 1700 return NOTIFY_OK;
1701} 1701}
1702 1702
1703static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { 1703static struct notifier_block xen_hvm_cpu_notifier = {
1704 .notifier_call = xen_hvm_cpu_notify, 1704 .notifier_call = xen_hvm_cpu_notify,
1705}; 1705};
1706 1706
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 94eac5c85cdc..056d11faef21 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void)
475#endif 475#endif
476} 476}
477 477
478static int __cpuinit register_callback(unsigned type, const void *func) 478static int register_callback(unsigned type, const void *func)
479{ 479{
480 struct callback_register callback = { 480 struct callback_register callback = {
481 .type = type, 481 .type = type,
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func)
486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); 486 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
487} 487}
488 488
489void __cpuinit xen_enable_sysenter(void) 489void xen_enable_sysenter(void)
490{ 490{
491 int ret; 491 int ret;
492 unsigned sysenter_feature; 492 unsigned sysenter_feature;
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void)
505 setup_clear_cpu_cap(sysenter_feature); 505 setup_clear_cpu_cap(sysenter_feature);
506} 506}
507 507
508void __cpuinit xen_enable_syscall(void) 508void xen_enable_syscall(void)
509{ 509{
510#ifdef CONFIG_X86_64 510#ifdef CONFIG_X86_64
511 int ret; 511 int ret;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c1367b29c3b1..ca92754eb846 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -65,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
65 return IRQ_HANDLED; 65 return IRQ_HANDLED;
66} 66}
67 67
68static void __cpuinit cpu_bringup(void) 68static void cpu_bringup(void)
69{ 69{
70 int cpu; 70 int cpu;
71 71
@@ -97,7 +97,7 @@ static void __cpuinit cpu_bringup(void)
97 wmb(); /* make sure everything is out */ 97 wmb(); /* make sure everything is out */
98} 98}
99 99
100static void __cpuinit cpu_bringup_and_idle(void) 100static void cpu_bringup_and_idle(void)
101{ 101{
102 cpu_bringup(); 102 cpu_bringup();
103 cpu_startup_entry(CPUHP_ONLINE); 103 cpu_startup_entry(CPUHP_ONLINE);
@@ -326,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
326 set_cpu_present(cpu, true); 326 set_cpu_present(cpu, true);
327} 327}
328 328
329static int __cpuinit 329static int
330cpu_initialize_context(unsigned int cpu, struct task_struct *idle) 330cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
331{ 331{
332 struct vcpu_guest_context *ctxt; 332 struct vcpu_guest_context *ctxt;
@@ -397,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
397 return 0; 397 return 0;
398} 398}
399 399
400static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) 400static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
401{ 401{
402 int rc; 402 int rc;
403 403
@@ -470,7 +470,7 @@ static void xen_cpu_die(unsigned int cpu)
470 xen_teardown_timer(cpu); 470 xen_teardown_timer(cpu);
471} 471}
472 472
473static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ 473static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
474{ 474{
475 play_dead_common(); 475 play_dead_common();
476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); 476 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
@@ -691,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
691 xen_init_lock_cpu(0); 691 xen_init_lock_cpu(0);
692} 692}
693 693
694static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 694static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
695{ 695{
696 int rc; 696 int rc;
697 rc = native_cpu_up(cpu, tidle); 697 rc = native_cpu_up(cpu, tidle);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index a40f8508e760..cf3caee356b3 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -361,7 +361,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id)
361 return IRQ_HANDLED; 361 return IRQ_HANDLED;
362} 362}
363 363
364void __cpuinit xen_init_lock_cpu(int cpu) 364void xen_init_lock_cpu(int cpu)
365{ 365{
366 int irq; 366 int irq;
367 char *name; 367 char *name;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index a95b41744ad0..86782c5d7e2a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {}
73 73
74#ifdef CONFIG_PARAVIRT_SPINLOCKS 74#ifdef CONFIG_PARAVIRT_SPINLOCKS
75void __init xen_init_spinlocks(void); 75void __init xen_init_spinlocks(void);
76void __cpuinit xen_init_lock_cpu(int cpu); 76void xen_init_lock_cpu(int cpu);
77void xen_uninit_lock_cpu(int cpu); 77void xen_uninit_lock_cpu(int cpu);
78#else 78#else
79static inline void xen_init_spinlocks(void) 79static inline void xen_init_spinlocks(void)
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index bdbb17312526..24bb0c1776ba 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -162,7 +162,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id)
162} 162}
163 163
164#ifndef CONFIG_GENERIC_CALIBRATE_DELAY 164#ifndef CONFIG_GENERIC_CALIBRATE_DELAY
165void __cpuinit calibrate_delay(void) 165void calibrate_delay(void)
166{ 166{
167 loops_per_jiffy = CCOUNT_PER_JIFFY; 167 loops_per_jiffy = CCOUNT_PER_JIFFY;
168 printk("Calibrating delay loop (skipped)... " 168 printk("Calibrating delay loop (skipped)... "