aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS4
-rw-r--r--Documentation/accounting/getdelays.c1
-rw-r--r--Documentation/devicetree/bindings/arm/armada-38x.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt20
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qup.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/email-clients.txt11
-rw-r--r--Documentation/hwmon/ntc_thermistor8
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/memory-hotplug.txt15
-rw-r--r--Documentation/ptp/testptp.c5
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt5
-rw-r--r--Documentation/sysctl/kernel.txt17
-rw-r--r--Documentation/sysctl/vm.txt3
-rw-r--r--MAINTAINERS22
-rw-r--r--Makefile2
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S2
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/head.S7
-rw-r--r--arch/arc/kernel/ptrace.c4
-rw-r--r--arch/arc/kernel/smp.c15
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/cache_arc700.c25
-rw-r--r--arch/arm/Kconfig17
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/compressed/Makefile5
-rw-r--r--arch/arm/boot/compressed/head.S8
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S (renamed from arch/arm/boot/compressed/vmlinux.lds.in)17
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/armada-380.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-385-rd.dts2
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi21
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi4
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts10
-rw-r--r--arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts40
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6q-gw51xx.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi27
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-microsom.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts4
-rw-r--r--arch/arm/boot/dts/stih415.dtsi8
-rw-r--r--arch/arm/boot/dts/stih416-b2020e.dts (renamed from arch/arm/boot/dts/stih416-b2020-revE.dts)0
-rw-r--r--arch/arm/boot/dts/stih416.dtsi8
-rw-r--r--arch/arm/common/mcpm_entry.c52
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig2
-rw-r--r--arch/arm/crypto/Makefile4
-rw-r--r--arch/arm/crypto/aes-armv4.S3
-rw-r--r--arch/arm/crypto/sha1-armv7-neon.S634
-rw-r--r--arch/arm/crypto/sha1_glue.c58
-rw-r--r--arch/arm/crypto/sha1_neon_glue.c197
-rw-r--r--arch/arm/crypto/sha512-armv7-neon.S455
-rw-r--r--arch/arm/crypto/sha512_neon_glue.c305
-rw-r--r--arch/arm/include/asm/assembler.h29
-rw-r--r--arch/arm/include/asm/cputype.h37
-rw-r--r--arch/arm/include/asm/crypto/sha1.h10
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S2
-rw-r--r--arch/arm/include/asm/glue-proc.h18
-rw-r--r--arch/arm/include/asm/mcpm.h16
-rw-r--r--arch/arm/include/asm/mcs_spinlock.h23
-rw-r--r--arch/arm/include/asm/memory.h6
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h3
-rw-r--r--arch/arm/include/asm/pgtable-3level.h49
-rw-r--r--arch/arm/include/asm/pgtable.h18
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/smp_scu.h2
-rw-r--r--arch/arm/include/asm/stacktrace.h15
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/uaccess.h20
-rw-r--r--arch/arm/include/asm/unistd.h10
-rw-r--r--arch/arm/include/uapi/asm/unistd.h11
-rw-r--r--arch/arm/kernel/debug.S10
-rw-r--r--arch/arm/kernel/entry-armv.S42
-rw-r--r--arch/arm/kernel/entry-common.S13
-rw-r--r--arch/arm/kernel/entry-header.S14
-rw-r--r--arch/arm/kernel/fiqasm.S4
-rw-r--r--arch/arm/kernel/head-common.S7
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S18
-rw-r--r--arch/arm/kernel/hyp-stub.S6
-rw-r--r--arch/arm/kernel/iwmmxt.S16
-rw-r--r--arch/arm/kernel/perf_event.c5
-rw-r--r--arch/arm/kernel/perf_event_cpu.c55
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/sleep.S2
-rw-r--r--arch/arm/kernel/smp_scu.c12
-rw-r--r--arch/arm/kernel/smp_tlb.c20
-rw-r--r--arch/arm/kernel/time.c5
-rw-r--r--arch/arm/kernel/traps.c6
-rw-r--r--arch/arm/kernel/unwind.c8
-rw-r--r--arch/arm/kernel/vmlinux.lds.S1
-rw-r--r--arch/arm/kvm/guest.c8
-rw-r--r--arch/arm/kvm/init.S3
-rw-r--r--arch/arm/lib/ashldi3.S3
-rw-r--r--arch/arm/lib/ashrdi3.S3
-rw-r--r--arch/arm/lib/backtrace.S2
-rw-r--r--arch/arm/lib/bitops.h5
-rw-r--r--arch/arm/lib/bswapsdi2.S5
-rw-r--r--arch/arm/lib/call_with_stack.S4
-rw-r--r--arch/arm/lib/csumpartial.S2
-rw-r--r--arch/arm/lib/csumpartialcopygeneric.S5
-rw-r--r--arch/arm/lib/delay-loop.S18
-rw-r--r--arch/arm/lib/div64.S13
-rw-r--r--arch/arm/lib/findbit.S10
-rw-r--r--arch/arm/lib/getuser.S45
-rw-r--r--arch/arm/lib/io-readsb.S2
-rw-r--r--arch/arm/lib/io-readsl.S6
-rw-r--r--arch/arm/lib/io-readsw-armv3.S4
-rw-r--r--arch/arm/lib/io-readsw-armv4.S2
-rw-r--r--arch/arm/lib/io-writesb.S2
-rw-r--r--arch/arm/lib/io-writesl.S10
-rw-r--r--arch/arm/lib/io-writesw-armv3.S4
-rw-r--r--arch/arm/lib/io-writesw-armv4.S4
-rw-r--r--arch/arm/lib/lib1funcs.S26
-rw-r--r--arch/arm/lib/lshrdi3.S3
-rw-r--r--arch/arm/lib/memchr.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/lib/memzero.S2
-rw-r--r--arch/arm/lib/muldi3.S3
-rw-r--r--arch/arm/lib/putuser.S10
-rw-r--r--arch/arm/lib/strchr.S2
-rw-r--r--arch/arm/lib/strrchr.S2
-rw-r--r--arch/arm/lib/ucmpdi2.S5
-rw-r--r--arch/arm/mach-davinci/sleep.S2
-rw-r--r--arch/arm/mach-ebsa110/include/mach/memory.h5
-rw-r--r--arch/arm/mach-ep93xx/crunch-bits.S6
-rw-r--r--arch/arm/mach-ep93xx/include/mach/memory.h22
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/hotplug.c8
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c28
-rw-r--r--arch/arm/mach-exynos/platsmp.c4
-rw-r--r--arch/arm/mach-exynos/pm.c14
-rw-r--r--arch/arm/mach-footbridge/include/mach/memory.h5
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c1
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S5
-rw-r--r--arch/arm/mach-integrator/include/mach/memory.h5
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c26
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c23
-rw-r--r--arch/arm/mach-iop13xx/include/mach/iop13xx.h2
-rw-r--r--arch/arm/mach-iop13xx/include/mach/memory.h5
-rw-r--r--arch/arm/mach-iop13xx/setup.c1
-rw-r--r--arch/arm/mach-ks8695/include/mach/memory.h5
-rw-r--r--arch/arm/mach-mvebu/Kconfig2
-rw-r--r--arch/arm/mach-mvebu/coherency_ll.S10
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S3
-rw-r--r--arch/arm/mach-omap1/include/mach/memory.h5
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S3
-rw-r--r--arch/arm/mach-omap2/sram242x.S6
-rw-r--r--arch/arm/mach-omap2/sram243x.S6
-rw-r--r--arch/arm/mach-pxa/mioa701_bootresume.S2
-rw-r--r--arch/arm/mach-pxa/standby.S4
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h9
-rw-r--r--arch/arm/mach-rpc/include/mach/memory.h5
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2410.S2
-rw-r--r--arch/arm/mach-s3c24xx/sleep-s3c2412.S2
-rw-r--r--arch/arm/mach-s5pv210/include/mach/memory.h2
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h5
-rw-r--r--arch/arm/mach-shmobile/headsmp.S3
-rw-r--r--arch/arm/mach-tegra/sleep-tegra20.S24
-rw-r--r--arch/arm/mach-tegra/sleep-tegra30.S14
-rw-r--r--arch/arm/mach-tegra/sleep.S8
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c19
-rw-r--r--arch/arm/mm/Kconfig7
-rw-r--r--arch/arm/mm/alignment.c4
-rw-r--r--arch/arm/mm/cache-fa.S19
-rw-r--r--arch/arm/mm/cache-l2x0.c2
-rw-r--r--arch/arm/mm/cache-nop.S5
-rw-r--r--arch/arm/mm/cache-v4.S13
-rw-r--r--arch/arm/mm/cache-v4wb.S15
-rw-r--r--arch/arm/mm/cache-v4wt.S13
-rw-r--r--arch/arm/mm/cache-v6.S20
-rw-r--r--arch/arm/mm/cache-v7.S30
-rw-r--r--arch/arm/mm/dump.c4
-rw-r--r--arch/arm/mm/l2c-l2x0-resume.S7
-rw-r--r--arch/arm/mm/mmu.c45
-rw-r--r--arch/arm/mm/proc-arm1020.S34
-rw-r--r--arch/arm/mm/proc-arm1020e.S34
-rw-r--r--arch/arm/mm/proc-arm1022.S34
-rw-r--r--arch/arm/mm/proc-arm1026.S34
-rw-r--r--arch/arm/mm/proc-arm720.S16
-rw-r--r--arch/arm/mm/proc-arm740.S8
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S8
-rw-r--r--arch/arm/mm/proc-arm920.S34
-rw-r--r--arch/arm/mm/proc-arm922.S34
-rw-r--r--arch/arm/mm/proc-arm925.S34
-rw-r--r--arch/arm/mm/proc-arm926.S34
-rw-r--r--arch/arm/mm/proc-arm940.S24
-rw-r--r--arch/arm/mm/proc-arm946.S30
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S8
-rw-r--r--arch/arm/mm/proc-fa526.S16
-rw-r--r--arch/arm/mm/proc-feroceon.S44
-rw-r--r--arch/arm/mm/proc-mohawk.S34
-rw-r--r--arch/arm/mm/proc-sa110.S16
-rw-r--r--arch/arm/mm/proc-sa1100.S16
-rw-r--r--arch/arm/mm/proc-v6.S16
-rw-r--r--arch/arm/mm/proc-v7-2level.S4
-rw-r--r--arch/arm/mm/proc-v7-3level.S14
-rw-r--r--arch/arm/mm/proc-v7.S74
-rw-r--r--arch/arm/mm/proc-v7m.S18
-rw-r--r--arch/arm/mm/proc-xsc3.S32
-rw-r--r--arch/arm/mm/proc-xscale.S34
-rw-r--r--arch/arm/mm/tlb-fa.S7
-rw-r--r--arch/arm/mm/tlb-v4.S5
-rw-r--r--arch/arm/mm/tlb-v4wb.S7
-rw-r--r--arch/arm/mm/tlb-v4wbi.S7
-rw-r--r--arch/arm/mm/tlb-v6.S5
-rw-r--r--arch/arm/mm/tlb-v7.S4
-rw-r--r--arch/arm/nwfpe/entry.S8
-rw-r--r--arch/arm/oprofile/common.c5
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/vfp/entry.S4
-rw-r--r--arch/arm/vfp/vfphw.S26
-rw-r--r--arch/arm/xen/hypercall.S6
-rw-r--r--arch/ia64/include/uapi/asm/fcntl.h1
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/uasm.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h1
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/irq-msc01.c2
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/r4k_fpu.S213
-rw-r--r--arch/mips/kernel/signal.c79
-rw-r--r--arch/mips/kernel/signal32.c74
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/math-emu/ieee754.c23
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c3
-rw-r--r--arch/mips/mm/uasm.c10
-rw-r--r--arch/mips/net/bpf_jit.c266
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/include/asm/code-patching.h11
-rw-r--r--arch/powerpc/include/asm/opal.h29
-rw-r--r--arch/powerpc/include/asm/swab.h43
-rw-r--r--arch/powerpc/kernel/ftrace.c52
-rw-r--r--arch/powerpc/kernel/iomap.c20
-rw-r--r--arch/powerpc/kernel/kprobes.c9
-rw-r--r--arch/powerpc/kernel/module_64.c11
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/prom_init.c211
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh4
-rw-r--r--arch/powerpc/kernel/setup-common.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c9
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c2
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/opal-takeover.S140
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c5
-rw-r--r--arch/sparc/include/asm/irq_64.h2
-rw-r--r--arch/sparc/kernel/process_64.c18
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c18
-rw-r--r--arch/x86/kernel/entry_32.S10
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/vdso/Makefile24
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
-rw-r--r--arch/x86/vdso/vdso-fakesections.c41
-rw-r--r--arch/x86/vdso/vdso-layout.lds.S64
-rw-r--r--arch/x86/vdso/vdso.lds.S2
-rw-r--r--arch/x86/vdso/vdso2c.c73
-rw-r--r--arch/x86/vdso/vdso2c.h199
-rw-r--r--arch/x86/vdso/vdso32/vdso-fakesections.c1
-rw-r--r--arch/x86/vdso/vdsox32.lds.S2
-rw-r--r--block/bio.c8
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-cgroup.h21
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/elevator.c2
-rw-r--r--crypto/Kconfig26
-rw-r--r--drivers/base/dma-contiguous.c12
-rw-r--r--drivers/block/drbd/drbd_receiver.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/rbd.c10
-rw-r--r--drivers/clocksource/arm_global_timer.c2
-rw-r--r--drivers/clocksource/exynos_mct.c9
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/fdt.c2
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c50
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c37
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c23
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c14
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/isdn/hisax/Kconfig11
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c1
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/can/slcan.c37
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c1
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/marvell/skge.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c5
-rw-r--r--drivers/net/phy/at803x.c195
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/slip/slip.c36
-rw-r--r--drivers/net/slip/slip.h1
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/b43/xmit.c10
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/util.h43
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h1
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c49
-rw-r--r--drivers/net/xen-netback/xenbus.c28
-rw-r--r--drivers/net/xen-netfront.c109
-rw-r--r--drivers/of/of_mdio.c8
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/regulator/bcm590xx-regulator.c5
-rw-r--r--drivers/regulator/palmas-regulator.c12
-rw-r--r--drivers/regulator/tps65218-regulator.c3
-rw-r--r--drivers/spi/spi-pxa2xx.c8
-rw-r--r--drivers/spi/spi-qup.c44
-rw-r--r--drivers/spi/spi-sh-sci.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c1
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/tc/tc.c10
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--fs/aio.c6
-rw-r--r--fs/cifs/cifs_unicode.c7
-rw-r--r--fs/cifs/cifsfs.c17
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/nfs/inode.c76
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4namespace.c102
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c57
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c3
-rw-r--r--fs/ocfs2/dlm/dlmthread.c13
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c18
-rw-r--r--fs/ocfs2/namei.c145
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/refcounttree.c8
-rw-r--r--fs/ocfs2/super.c8
-rw-r--r--include/drm/i915_pciids.h12
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h3
-rw-r--r--include/dt-bindings/clock/stih415-clks.h1
-rw-r--r--include/dt-bindings/clock/stih416-clks.h1
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/nmi.h12
-rw-r--r--include/linux/page-flags.h3
-rw-r--r--include/linux/phy.h9
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/uio.h19
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/sock.h4
-rw-r--r--include/trace/ftrace.h33
-rw-r--r--include/trace/syscall.h15
-rw-r--r--include/uapi/sound/compress_offload.h14
-rw-r--r--include/uapi/sound/compress_params.h14
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/smp.c57
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/tracepoint.c26
-rw-r--r--kernel/watchdog.c41
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/iovec.c55
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c62
-rw-r--r--lib/swiotlb.c28
-rw-r--r--mm/huge_memory.c57
-rw-r--r--mm/hugetlb.c71
-rw-r--r--mm/ksm.c1
-rw-r--r--mm/mempolicy.c46
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page_alloc.c40
-rw-r--r--mm/rmap.c12
-rw-r--r--mm/shmem.c59
-rw-r--r--mm/slab.c90
-rw-r--r--net/8021q/vlan_core.c5
-rw-r--r--net/bluetooth/hci_conn.c7
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/mgmt.c104
-rw-r--r--net/bluetooth/smp.c9
-rw-r--r--net/core/dst.c16
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/ip_tunnel.c14
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c20
-rw-r--r--net/netfilter/nf_nat_core.c35
-rw-r--r--net/netfilter/nf_tables_api.c11
-rw-r--r--net/netfilter/nft_compat.c18
-rw-r--r--net/netfilter/nft_nat.c14
-rw-r--r--net/sctp/sysctl.c46
-rw-r--r--net/sunrpc/auth.c1
-rw-r--r--samples/trace_events/trace-events-sample.h3
-rwxr-xr-xscripts/checkpatch.pl15
-rw-r--r--scripts/recordmcount.h4
-rw-r--r--sound/pci/hda/hda_auto_parser.c1
-rw-r--r--sound/pci/hda/hda_intel.c65
-rw-r--r--sound/pci/hda/hda_local.h21
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c489
-rw-r--r--sound/pci/hda/patch_sigmatel.c58
-rw-r--r--sound/usb/card.c13
-rw-r--r--sound/usb/endpoint.c17
-rw-r--r--sound/usb/endpoint.h1
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c14
483 files changed, 6139 insertions, 3463 deletions
diff --git a/CREDITS b/CREDITS
index c322dcfb926d..28ee1514b9de 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,6 +9,10 @@
9 Linus 9 Linus
10---------- 10----------
11 11
12M: Matt Mackal
13E: mpm@selenic.com
14D: SLOB slab allocator
15
12N: Matti Aarnio 16N: Matti Aarnio
13E: mea@nic.funet.fi 17E: mea@nic.funet.fi
14D: Alpha systems hacking, IPv6 and other network related stuff 18D: Alpha systems hacking, IPv6 and other network related stuff
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index c6a06b71594d..f40578026a04 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -314,6 +314,7 @@ int main(int argc, char *argv[])
314 break; 314 break;
315 case 'm': 315 case 'm':
316 strncpy(cpumask, optarg, sizeof(cpumask)); 316 strncpy(cpumask, optarg, sizeof(cpumask));
317 cpumask[sizeof(cpumask) - 1] = '\0';
317 maskset = 1; 318 maskset = 1;
318 printf("cpumask %s maskset %d\n", cpumask, maskset); 319 printf("cpumask %s maskset %d\n", cpumask, maskset);
319 break; 320 break;
diff --git a/Documentation/devicetree/bindings/arm/armada-38x.txt b/Documentation/devicetree/bindings/arm/armada-38x.txt
index 11f2330a6554..ad9f8ed4d9bd 100644
--- a/Documentation/devicetree/bindings/arm/armada-38x.txt
+++ b/Documentation/devicetree/bindings/arm/armada-38x.txt
@@ -6,5 +6,15 @@ following property:
6 6
7Required root node property: 7Required root node property:
8 8
9 - compatible: must contain either "marvell,armada380" or 9 - compatible: must contain "marvell,armada380"
10 "marvell,armada385" depending on the variant of the SoC being used. 10
11In addition, boards using the Marvell Armada 385 SoC shall have the
12following property before the previous one:
13
14Required root node property:
15
16compatible: must contain "marvell,armada385"
17
18Example:
19
20compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 5d49f2b37f68..832fe8cc24d7 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -48,7 +48,7 @@ adc@12D10000 {
48 48
49 /* NTC thermistor is a hwmon device */ 49 /* NTC thermistor is a hwmon device */
50 ncp15wb473@0 { 50 ncp15wb473@0 {
51 compatible = "ntc,ncp15wb473"; 51 compatible = "murata,ncp15wb473";
52 pullup-uv = <1800000>; 52 pullup-uv = <1800000>;
53 pullup-ohm = <47000>; 53 pullup-ohm = <47000>;
54 pulldown-ohm = <0>; 54 pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
index c6f66674f19c..b117b2e9e1a7 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
+++ b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
@@ -3,11 +3,19 @@ NTC Thermistor hwmon sensors
3 3
4Requires node properties: 4Requires node properties:
5- "compatible" value : one of 5- "compatible" value : one of
6 "ntc,ncp15wb473" 6 "murata,ncp15wb473"
7 "ntc,ncp18wb473" 7 "murata,ncp18wb473"
8 "ntc,ncp21wb473" 8 "murata,ncp21wb473"
9 "ntc,ncp03wb473" 9 "murata,ncp03wb473"
10 "ntc,ncp15wl333" 10 "murata,ncp15wl333"
11
12/* Usage of vendor name "ntc" is deprecated */
13<DEPRECATED> "ntc,ncp15wb473"
14<DEPRECATED> "ntc,ncp18wb473"
15<DEPRECATED> "ntc,ncp21wb473"
16<DEPRECATED> "ntc,ncp03wb473"
17<DEPRECATED> "ntc,ncp15wl333"
18
11- "pullup-uv" Pull up voltage in micro volts 19- "pullup-uv" Pull up voltage in micro volts
12- "pullup-ohm" Pull up resistor value in ohms 20- "pullup-ohm" Pull up resistor value in ohms
13- "pulldown-ohm" Pull down resistor value in ohms 21- "pulldown-ohm" Pull down resistor value in ohms
@@ -21,7 +29,7 @@ Read more about iio bindings at
21 29
22Example: 30Example:
23 ncp15wb473@0 { 31 ncp15wb473@0 {
24 compatible = "ntc,ncp15wb473"; 32 compatible = "murata,ncp15wb473";
25 pullup-uv = <1800000>; 33 pullup-uv = <1800000>;
26 pullup-ohm = <47000>; 34 pullup-ohm = <47000>;
27 pulldown-ohm = <0>; 35 pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
index b82a268f1bd4..bee6ff204baf 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
@@ -23,6 +23,12 @@ Optional properties:
23- spi-max-frequency: Specifies maximum SPI clock frequency, 23- spi-max-frequency: Specifies maximum SPI clock frequency,
24 Units - Hz. Definition as per 24 Units - Hz. Definition as per
25 Documentation/devicetree/bindings/spi/spi-bus.txt 25 Documentation/devicetree/bindings/spi/spi-bus.txt
26- num-cs: total number of chipselects
27- cs-gpios: should specify GPIOs used for chipselects.
28 The gpios will be referred to as reg = <index> in the SPI child
29 nodes. If unspecified, a single SPI device without a chip
30 select can be used.
31
26 32
27SPI slave nodes must be children of the SPI master node and can contain 33SPI slave nodes must be children of the SPI master node and can contain
28properties described in Documentation/devicetree/bindings/spi/spi-bus.txt 34properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4d7f3758d1b4..46a311e728a8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -83,6 +83,7 @@ mosaixtech Mosaix Technologies, Inc.
83moxa Moxa 83moxa Moxa
84mpl MPL AG 84mpl MPL AG
85mundoreader Mundo Reader S.L. 85mundoreader Mundo Reader S.L.
86murata Murata Manufacturing Co., Ltd.
86mxicy Macronix International Co., Ltd. 87mxicy Macronix International Co., Ltd.
87national National Semiconductor 88national National Semiconductor
88neonode Neonode Inc. 89neonode Neonode Inc.
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 4e30ebaa9e5b..9af538be3751 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -1,6 +1,17 @@
1Email clients info for Linux 1Email clients info for Linux
2====================================================================== 2======================================================================
3 3
4Git
5----------------------------------------------------------------------
6These days most developers use `git send-email` instead of regular
7email clients. The man page for this is quite good. On the receiving
8end, maintainers use `git am` to apply the patches.
9
10If you are new to git then send your first patch to yourself. Save it
11as raw text including all the headers. Run `git am raw_email.txt` and
12then review the changelog with `git log`. When that works then send
13the patch to the appropriate mailing list(s).
14
4General Preferences 15General Preferences
5---------------------------------------------------------------------- 16----------------------------------------------------------------------
6Patches for the Linux kernel are submitted via email, preferably as 17Patches for the Linux kernel are submitted via email, preferably as
diff --git a/Documentation/hwmon/ntc_thermistor b/Documentation/hwmon/ntc_thermistor
index 3bfda94096fd..057b77029f26 100644
--- a/Documentation/hwmon/ntc_thermistor
+++ b/Documentation/hwmon/ntc_thermistor
@@ -1,7 +1,7 @@
1Kernel driver ntc_thermistor 1Kernel driver ntc_thermistor
2================= 2=================
3 3
4Supported thermistors: 4Supported thermistors from Murata:
5* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333 5* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333
6 Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333' 6 Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333'
7 Datasheet: Publicly available at Murata 7 Datasheet: Publicly available at Murata
@@ -15,9 +15,9 @@ Authors:
15Description 15Description
16----------- 16-----------
17 17
18The NTC thermistor is a simple thermistor that requires users to provide the 18The NTC (Negative Temperature Coefficient) thermistor is a simple thermistor
19resistance and lookup the corresponding compensation table to get the 19that requires users to provide the resistance and lookup the corresponding
20temperature input. 20compensation table to get the temperature input.
21 21
22The NTC driver provides lookup tables with a linear approximation function 22The NTC driver provides lookup tables with a linear approximation function
23and four circuit models with an option not to use any of the four models. 23and four circuit models with an option not to use any of the four models.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 884904975d0b..c1b9aa8c5a52 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3130,6 +3130,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3130 [KNL] Should the soft-lockup detector generate panics. 3130 [KNL] Should the soft-lockup detector generate panics.
3131 Format: <integer> 3131 Format: <integer>
3132 3132
3133 softlockup_all_cpu_backtrace=
3134 [KNL] Should the soft-lockup detector generate
3135 backtraces on all cpus.
3136 Format: <integer>
3137
3133 sonypi.*= [HW] Sony Programmable I/O Control Device driver 3138 sonypi.*= [HW] Sony Programmable I/O Control Device driver
3134 See Documentation/laptops/sonypi.txt 3139 See Documentation/laptops/sonypi.txt
3135 3140
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index f304edb8fbe7..45134dc23854 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -209,15 +209,12 @@ If memory device is found, memory hotplug code will be called.
209 209
2104.2 Notify memory hot-add event by hand 2104.2 Notify memory hot-add event by hand
211------------ 211------------
212On powerpc, the firmware does not notify a memory hotplug event to the kernel. 212On some architectures, the firmware may not notify the kernel of a memory
213Therefore, "probe" interface is supported to notify the event to the kernel. 213hotplug event. Therefore, the memory "probe" interface is supported to
214This interface depends on CONFIG_ARCH_MEMORY_PROBE. 214explicitly notify the kernel. This interface depends on
215 215CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
216CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config 216if hotplug is supported, although for x86 this should be handled by ACPI
217option is disabled by default since ACPI notifies a memory hotplug event to 217notification.
218the kernel, which performs its hotplug operation as the result. Please
219enable this option if you need the "probe" interface for testing purposes
220on x86.
221 218
222Probe interface is located at 219Probe interface is located at
223/sys/devices/system/memory/probe 220/sys/devices/system/memory/probe
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index f1ac2dae999e..ba1d50200c46 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -17,6 +17,7 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20#define _GNU_SOURCE
20#include <errno.h> 21#include <errno.h>
21#include <fcntl.h> 22#include <fcntl.h>
22#include <inttypes.h> 23#include <inttypes.h>
@@ -46,12 +47,14 @@
46#define CLOCK_INVALID -1 47#define CLOCK_INVALID -1
47#endif 48#endif
48 49
49/* When glibc offers the syscall, this will go away. */ 50/* clock_adjtime is not available in GLIBC < 2.14 */
51#if !__GLIBC_PREREQ(2, 14)
50#include <sys/syscall.h> 52#include <sys/syscall.h>
51static int clock_adjtime(clockid_t id, struct timex *tx) 53static int clock_adjtime(clockid_t id, struct timex *tx)
52{ 54{
53 return syscall(__NR_clock_adjtime, id, tx); 55 return syscall(__NR_clock_adjtime, id, tx);
54} 56}
57#endif
55 58
56static clockid_t get_clockid(int fd) 59static clockid_t get_clockid(int fd)
57{ 60{
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 85c362d8ea34..d1ab5e17eb13 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -286,6 +286,11 @@ STAC92HD83*
286 hp-inv-led HP with broken BIOS for inverted mute LED 286 hp-inv-led HP with broken BIOS for inverted mute LED
287 auto BIOS setup (default) 287 auto BIOS setup (default)
288 288
289STAC92HD95
290==========
291 hp-led LED support for HP laptops
292 hp-bass Bass HPF setup for HP Spectre 13
293
289STAC9872 294STAC9872
290======== 295========
291 vaio VAIO laptop without SPDIF 296 vaio VAIO laptop without SPDIF
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 708bb7f1b7e0..c14374e71775 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
75- shmall 75- shmall
76- shmmax [ sysv ipc ] 76- shmmax [ sysv ipc ]
77- shmmni 77- shmmni
78- softlockup_all_cpu_backtrace
78- stop-a [ SPARC only ] 79- stop-a [ SPARC only ]
79- sysrq ==> Documentation/sysrq.txt 80- sysrq ==> Documentation/sysrq.txt
80- sysctl_writes_strict 81- sysctl_writes_strict
@@ -783,6 +784,22 @@ via the /proc/sys interface:
783 784
784============================================================== 785==============================================================
785 786
787softlockup_all_cpu_backtrace:
788
789This value controls the soft lockup detector thread's behavior
790when a soft lockup condition is detected as to whether or not
791to gather further debug information. If enabled, each cpu will
792be issued an NMI and instructed to capture stack trace.
793
794This feature is only applicable for architectures which support
795NMI.
796
7970: do nothing. This is the default behavior.
798
7991: on detection capture more debug information.
800
801==============================================================
802
786tainted: 803tainted:
787 804
788Non-zero if the kernel has been tainted. Numeric values, which 805Non-zero if the kernel has been tainted. Numeric values, which
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index bd4b34c03738..4415aa915681 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -702,7 +702,8 @@ The batch value of each per cpu pagelist is also updated as a result. It is
702set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8) 702set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
703 703
704The initial value is zero. Kernel does not use this value at boot time to set 704The initial value is zero. Kernel does not use this value at boot time to set
705the high water marks for each per cpu page list. 705the high water marks for each per cpu page list. If the user writes '0' to this
706sysctl, it will revert to this default behavior.
706 707
707============================================================== 708==============================================================
708 709
diff --git a/MAINTAINERS b/MAINTAINERS
index 3f2e171047b9..702ca10a5a6c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2917,6 +2917,9 @@ L: linux-doc@vger.kernel.org
2917T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ 2917T: quilt http://www.infradead.org/~rdunlap/Doc/patches/
2918S: Maintained 2918S: Maintained
2919F: Documentation/ 2919F: Documentation/
2920X: Documentation/ABI/
2921X: Documentation/devicetree/
2922X: Documentation/[a-z][a-z]_[A-Z][A-Z]/
2920 2923
2921DOUBLETALK DRIVER 2924DOUBLETALK DRIVER
2922M: "James R. Van Zandt" <jrv@vanzandt.mv.com> 2925M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -3189,14 +3192,6 @@ L: linux-scsi@vger.kernel.org
3189S: Maintained 3192S: Maintained
3190F: drivers/scsi/eata_pio.* 3193F: drivers/scsi/eata_pio.*
3191 3194
3192EBTABLES
3193L: netfilter-devel@vger.kernel.org
3194W: http://ebtables.sourceforge.net/
3195S: Orphan
3196F: include/linux/netfilter_bridge/ebt_*.h
3197F: include/uapi/linux/netfilter_bridge/ebt_*.h
3198F: net/bridge/netfilter/ebt*.c
3199
3200EC100 MEDIA DRIVER 3195EC100 MEDIA DRIVER
3201M: Antti Palosaari <crope@iki.fi> 3196M: Antti Palosaari <crope@iki.fi>
3202L: linux-media@vger.kernel.org 3197L: linux-media@vger.kernel.org
@@ -6105,12 +6100,11 @@ F: Documentation/networking/s2io.txt
6105F: Documentation/networking/vxge.txt 6100F: Documentation/networking/vxge.txt
6106F: drivers/net/ethernet/neterion/ 6101F: drivers/net/ethernet/neterion/
6107 6102
6108NETFILTER/IPTABLES 6103NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
6109M: Pablo Neira Ayuso <pablo@netfilter.org> 6104M: Pablo Neira Ayuso <pablo@netfilter.org>
6110M: Patrick McHardy <kaber@trash.net> 6105M: Patrick McHardy <kaber@trash.net>
6111M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 6106M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6112L: netfilter-devel@vger.kernel.org 6107L: netfilter-devel@vger.kernel.org
6113L: netfilter@vger.kernel.org
6114L: coreteam@netfilter.org 6108L: coreteam@netfilter.org
6115W: http://www.netfilter.org/ 6109W: http://www.netfilter.org/
6116W: http://www.iptables.org/ 6110W: http://www.iptables.org/
@@ -8196,13 +8190,15 @@ S: Maintained
8196F: drivers/usb/misc/sisusbvga/ 8190F: drivers/usb/misc/sisusbvga/
8197 8191
8198SLAB ALLOCATOR 8192SLAB ALLOCATOR
8199M: Christoph Lameter <cl@linux-foundation.org> 8193M: Christoph Lameter <cl@linux.com>
8200M: Pekka Enberg <penberg@kernel.org> 8194M: Pekka Enberg <penberg@kernel.org>
8201M: Matt Mackall <mpm@selenic.com> 8195M: David Rientjes <rientjes@google.com>
8196M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
8197M: Andrew Morton <akpm@linux-foundation.org>
8202L: linux-mm@kvack.org 8198L: linux-mm@kvack.org
8203S: Maintained 8199S: Maintained
8204F: include/linux/sl?b*.h 8200F: include/linux/sl?b*.h
8205F: mm/sl?b.c 8201F: mm/sl?b*
8206 8202
8207SLEEPABLE READ-COPY UPDATE (SRCU) 8203SLEEPABLE READ-COPY UPDATE (SRCU)
8208M: Lai Jiangshan <laijs@cn.fujitsu.com> 8204M: Lai Jiangshan <laijs@cn.fujitsu.com>
diff --git a/Makefile b/Makefile
index b11e2d504a00..13175632137f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index c1d3d2da1191..b3c750979aa1 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
60#define ARC_REG_IC_IVIC 0x10 60#define ARC_REG_IC_IVIC 0x10
61#define ARC_REG_IC_CTRL 0x11 61#define ARC_REG_IC_CTRL 0x11
62#define ARC_REG_IC_IVIL 0x19 62#define ARC_REG_IC_IVIL 0x19
63#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) 63#if defined(CONFIG_ARC_MMU_V3)
64#define ARC_REG_IC_PTAG 0x1E 64#define ARC_REG_IC_PTAG 0x1E
65#endif 65#endif
66 66
@@ -74,7 +74,7 @@ extern void read_decode_cache_bcr(void);
74#define ARC_REG_DC_IVDL 0x4A 74#define ARC_REG_DC_IVDL 0x4A
75#define ARC_REG_DC_FLSH 0x4B 75#define ARC_REG_DC_FLSH 0x4B
76#define ARC_REG_DC_FLDL 0x4C 76#define ARC_REG_DC_FLDL 0x4C
77#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) 77#if defined(CONFIG_ARC_MMU_V3)
78#define ARC_REG_DC_PTAG 0x5C 78#define ARC_REG_DC_PTAG 0x5C
79#endif 79#endif
80 80
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 2618cc13ba75..76a7739aab1c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -11,6 +11,7 @@
11#ifndef _UAPI__ASM_ARC_PTRACE_H 11#ifndef _UAPI__ASM_ARC_PTRACE_H
12#define _UAPI__ASM_ARC_PTRACE_H 12#define _UAPI__ASM_ARC_PTRACE_H
13 13
14#define PTRACE_GET_THREAD_AREA 25
14 15
15#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
16/* 17/*
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index 2ff0347a2fd7..e248594097e7 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -10,9 +10,9 @@
10 * -This is the more "natural" hand written assembler 10 * -This is the more "natural" hand written assembler
11 */ 11 */
12 12
13#include <linux/linkage.h>
13#include <asm/entry.h> /* For the SAVE_* macros */ 14#include <asm/entry.h> /* For the SAVE_* macros */
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
15#include <asm/linkage.h>
16 16
17#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) 17#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
18 18
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index 0b3ef4025d89..fffdb5e41b20 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -41,7 +41,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
41{ 41{
42 const struct machine_desc *mdesc; 42 const struct machine_desc *mdesc;
43 unsigned long dt_root; 43 unsigned long dt_root;
44 void *clk; 44 const void *clk;
45 int len; 45 int len;
46 46
47 if (!early_init_dt_scan(dt)) 47 if (!early_init_dt_scan(dt))
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 07a58f2d3077..4d2481bd8b98 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -77,10 +77,11 @@ stext:
77 ; Clear BSS before updating any globals 77 ; Clear BSS before updating any globals
78 ; XXX: use ZOL here 78 ; XXX: use ZOL here
79 mov r5, __bss_start 79 mov r5, __bss_start
80 mov r6, __bss_stop 80 sub r6, __bss_stop, r5
81 lsr.f lp_count, r6, 2
82 lpnz 1f
83 st.ab 0, [r5, 4]
811: 841:
82 st.ab 0, [r5,4]
83 brlt r5, r6, 1b
84 85
85 ; Uboot - kernel ABI 86 ; Uboot - kernel ABI
86 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 87 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 5d76706139dd..13b3ffb27a38 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
146 pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data); 146 pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
147 147
148 switch (request) { 148 switch (request) {
149 case PTRACE_GET_THREAD_AREA:
150 ret = put_user(task_thread_info(child)->thr_ptr,
151 (unsigned long __user *)data);
152 break;
149 default: 153 default:
150 ret = ptrace_request(child, request, addr, data); 154 ret = ptrace_request(child, request, addr, data);
151 break; 155 break;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index cf90b6f4d3e0..c802bb500602 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -337,8 +337,19 @@ irqreturn_t do_IPI(int irq, void *dev_id)
337 * API called by platform code to hookup arch-common ISR to their IPI IRQ 337 * API called by platform code to hookup arch-common ISR to their IPI IRQ
338 */ 338 */
339static DEFINE_PER_CPU(int, ipi_dev); 339static DEFINE_PER_CPU(int, ipi_dev);
340
341static struct irqaction arc_ipi_irq = {
342 .name = "IPI Interrupt",
343 .flags = IRQF_PERCPU,
344 .handler = do_IPI,
345};
346
340int smp_ipi_irq_setup(int cpu, int irq) 347int smp_ipi_irq_setup(int cpu, int irq)
341{ 348{
342 int *dev_id = &per_cpu(ipi_dev, smp_processor_id()); 349 if (!cpu)
343 return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id); 350 return setup_irq(irq, &arc_ipi_irq);
351 else
352 arch_unmask_irq(irq);
353
354 return 0;
344} 355}
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 2555f5886af6..dd35bde39f69 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -116,7 +116,7 @@ SECTIONS
116 116
117 _edata = .; 117 _edata = .;
118 118
119 BSS_SECTION(0, 0, 0) 119 BSS_SECTION(4, 4, 4)
120 120
121#ifdef CONFIG_ARC_DW2_UNWIND 121#ifdef CONFIG_ARC_DW2_UNWIND
122 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE);
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 1f676c4794e0..353b202c37c9 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -389,7 +389,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
389/*********************************************************** 389/***********************************************************
390 * Machine specific helper for per line I-Cache invalidate. 390 * Machine specific helper for per line I-Cache invalidate.
391 */ 391 */
392static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 392static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
393 unsigned long sz) 393 unsigned long sz)
394{ 394{
395 unsigned long flags; 395 unsigned long flags;
@@ -405,6 +405,23 @@ static inline void __ic_entire_inv(void)
405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
406} 406}
407 407
408struct ic_line_inv_vaddr_ipi {
409 unsigned long paddr, vaddr;
410 int sz;
411};
412
413static void __ic_line_inv_vaddr_helper(void *info)
414{
415 struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
416 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
417}
418
419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
420 unsigned long sz)
421{
422 struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
423 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
424}
408#else 425#else
409 426
410#define __ic_entire_inv() 427#define __ic_entire_inv()
@@ -553,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
553 */ 570 */
554void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 571void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
555{ 572{
556 unsigned long flags;
557
558 local_irq_save(flags);
559 __ic_line_inv_vaddr(paddr, vaddr, len);
560 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 573 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
561 local_irq_restore(flags); 574 __ic_line_inv_vaddr(paddr, vaddr, len);
562} 575}
563 576
564/* wrapper to compile time eliminate alignment checks in flush loop */ 577/* wrapper to compile time eliminate alignment checks in flush loop */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 7a14af6e3d2c..75b3b721347c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -262,8 +262,22 @@ config NEED_MACH_MEMORY_H
262 262
263config PHYS_OFFSET 263config PHYS_OFFSET
264 hex "Physical address of main memory" if MMU 264 hex "Physical address of main memory" if MMU
265 depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H 265 depends on !ARM_PATCH_PHYS_VIRT
266 default DRAM_BASE if !MMU 266 default DRAM_BASE if !MMU
267 default 0x00000000 if ARCH_EBSA110 || \
268 EP93XX_SDCE3_SYNC_PHYS_OFFSET || \
269 ARCH_FOOTBRIDGE || \
270 ARCH_INTEGRATOR || \
271 ARCH_IOP13XX || \
272 ARCH_KS8695 || \
273 (ARCH_REALVIEW && !REALVIEW_HIGH_PHYS_OFFSET)
274 default 0x10000000 if ARCH_OMAP1 || ARCH_RPC
275 default 0x20000000 if ARCH_S5PV210
276 default 0x70000000 if REALVIEW_HIGH_PHYS_OFFSET
277 default 0xc0000000 if EP93XX_SDCE0_PHYS_OFFSET || ARCH_SA1100
278 default 0xd0000000 if EP93XX_SDCE1_PHYS_OFFSET
279 default 0xe0000000 if EP93XX_SDCE2_PHYS_OFFSET
280 default 0xf0000000 if EP93XX_SDCE3_ASYNC_PHYS_OFFSET
267 help 281 help
268 Please provide the physical address corresponding to the 282 Please provide the physical address corresponding to the
269 location of main memory in your system. 283 location of main memory in your system.
@@ -435,7 +449,6 @@ config ARCH_EP93XX
435 select ARM_VIC 449 select ARM_VIC
436 select CLKDEV_LOOKUP 450 select CLKDEV_LOOKUP
437 select CPU_ARM920T 451 select CPU_ARM920T
438 select NEED_MACH_MEMORY_H
439 help 452 help
440 This enables support for the Cirrus EP93xx series of CPUs. 453 This enables support for the Cirrus EP93xx series of CPUs.
441 454
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6721fab13734..718913dfe815 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -127,6 +127,9 @@ CHECKFLAGS += -D__arm__
127 127
128#Default value 128#Default value
129head-y := arch/arm/kernel/head$(MMUEXT).o 129head-y := arch/arm/kernel/head$(MMUEXT).o
130
131# Text offset. This list is sorted numerically by address in order to
132# provide a means to avoid/resolve conflicts in multi-arch kernels.
130textofs-y := 0x00008000 133textofs-y := 0x00008000
131textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000 134textofs-$(CONFIG_ARCH_CLPS711X) := 0x00028000
132# We don't want the htc bootloader to corrupt kernel during resume 135# We don't want the htc bootloader to corrupt kernel during resume
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 68c918362b79..76a50ecae1c3 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -81,7 +81,7 @@ ZTEXTADDR := 0
81ZBSSADDR := ALIGN(8) 81ZBSSADDR := ALIGN(8)
82endif 82endif
83 83
84SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 84CPPFLAGS_vmlinux.lds := -DTEXT_START="$(ZTEXTADDR)" -DBSS_START="$(ZBSSADDR)"
85 85
86suffix_$(CONFIG_KERNEL_GZIP) = gzip 86suffix_$(CONFIG_KERNEL_GZIP) = gzip
87suffix_$(CONFIG_KERNEL_LZO) = lzo 87suffix_$(CONFIG_KERNEL_LZO) = lzo
@@ -199,8 +199,5 @@ CFLAGS_font.o := -Dstatic=
199$(obj)/font.c: $(FONTC) 199$(obj)/font.c: $(FONTC)
200 $(call cmd,shipped) 200 $(call cmd,shipped)
201 201
202$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
203 @sed "$(SEDFLAGS)" < $< > $@
204
205$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S 202$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
206 $(call cmd,shipped) 203 $(call cmd,shipped)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 3a8b32df6b31..413fd94b5301 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -125,9 +125,11 @@ start:
125 THUMB( adr r12, BSYM(1f) ) 125 THUMB( adr r12, BSYM(1f) )
126 THUMB( bx r12 ) 126 THUMB( bx r12 )
127 127
128 .word 0x016f2818 @ Magic numbers to help the loader 128 .word _magic_sig @ Magic numbers to help the loader
129 .word start @ absolute load/run zImage address 129 .word _magic_start @ absolute load/run zImage address
130 .word _edata @ zImage end address 130 .word _magic_end @ zImage end address
131 .word 0x04030201 @ endianness flag
132
131 THUMB( .thumb ) 133 THUMB( .thumb )
1321: 1341:
133 ARM_BE8( setend be ) @ go BE8 if compiled for BE8 135 ARM_BE8( setend be ) @ go BE8 if compiled for BE8
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.S
index 4919f2ac8b89..2b60b843ac5e 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -1,12 +1,20 @@
1/* 1/*
2 * linux/arch/arm/boot/compressed/vmlinux.lds.in
3 *
4 * Copyright (C) 2000 Russell King 2 * Copyright (C) 2000 Russell King
5 * 3 *
6 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
9 */ 7 */
8
9#ifdef CONFIG_CPU_ENDIAN_BE8
10#define ZIMAGE_MAGIC(x) ( (((x) >> 24) & 0x000000ff) | \
11 (((x) >> 8) & 0x0000ff00) | \
12 (((x) << 8) & 0x00ff0000) | \
13 (((x) << 24) & 0xff000000) )
14#else
15#define ZIMAGE_MAGIC(x) (x)
16#endif
17
10OUTPUT_ARCH(arm) 18OUTPUT_ARCH(arm)
11ENTRY(_start) 19ENTRY(_start)
12SECTIONS 20SECTIONS
@@ -57,6 +65,10 @@ SECTIONS
57 .pad : { BYTE(0); . = ALIGN(8); } 65 .pad : { BYTE(0); . = ALIGN(8); }
58 _edata = .; 66 _edata = .;
59 67
68 _magic_sig = ZIMAGE_MAGIC(0x016f2818);
69 _magic_start = ZIMAGE_MAGIC(_start);
70 _magic_end = ZIMAGE_MAGIC(_edata);
71
60 . = BSS_START; 72 . = BSS_START;
61 __bss_start = .; 73 __bss_start = .;
62 .bss : { *(.bss) } 74 .bss : { *(.bss) }
@@ -73,4 +85,3 @@ SECTIONS
73 .stab.indexstr 0 : { *(.stab.indexstr) } 85 .stab.indexstr 0 : { *(.stab.indexstr) }
74 .comment 0 : { *(.comment) } 86 .comment 0 : { *(.comment) }
75} 87}
76
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 5986ff63b901..adb5ed9e269e 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \
357 stih415-b2020.dtb \ 357 stih415-b2020.dtb \
358 stih416-b2000.dtb \ 358 stih416-b2000.dtb \
359 stih416-b2020.dtb \ 359 stih416-b2020.dtb \
360 stih416-b2020-revE.dtb 360 stih416-b2020e.dtb
361dtb-$(CONFIG_MACH_SUN4I) += \ 361dtb-$(CONFIG_MACH_SUN4I) += \
362 sun4i-a10-a1000.dtb \ 362 sun4i-a10-a1000.dtb \
363 sun4i-a10-cubieboard.dtb \ 363 sun4i-a10-cubieboard.dtb \
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
index e69bc6759c39..4173a8ab34e7 100644
--- a/arch/arm/boot/dts/armada-380.dtsi
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 380 family SoC"; 18 model = "Marvell Armada 380 family SoC";
19 compatible = "marvell,armada380", "marvell,armada38x"; 19 compatible = "marvell,armada380";
20 20
21 cpus { 21 cpus {
22 #address-cells = <1>; 22 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-385-db.dts b/arch/arm/boot/dts/armada-385-db.dts
index 5bae4731828b..1af886f1e486 100644
--- a/arch/arm/boot/dts/armada-385-db.dts
+++ b/arch/arm/boot/dts/armada-385-db.dts
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 385 Development Board"; 18 model = "Marvell Armada 385 Development Board";
19 compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x"; 19 compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada380";
20 20
21 chosen { 21 chosen {
22 bootargs = "console=ttyS0,115200 earlyprintk"; 22 bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-385-rd.dts b/arch/arm/boot/dts/armada-385-rd.dts
index 40893255a3f0..aaca2861dc87 100644
--- a/arch/arm/boot/dts/armada-385-rd.dts
+++ b/arch/arm/boot/dts/armada-385-rd.dts
@@ -17,7 +17,7 @@
17 17
18/ { 18/ {
19 model = "Marvell Armada 385 Reference Design"; 19 model = "Marvell Armada 385 Reference Design";
20 compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x"; 20 compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
21 21
22 chosen { 22 chosen {
23 bootargs = "console=ttyS0,115200 earlyprintk"; 23 bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index f011009bf4cf..6283d7912f71 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 385 family SoC"; 18 model = "Marvell Armada 385 family SoC";
19 compatible = "marvell,armada385", "marvell,armada38x"; 19 compatible = "marvell,armada385", "marvell,armada380";
20 20
21 cpus { 21 cpus {
22 #address-cells = <1>; 22 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 3de364e81b52..689fa1a46728 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -20,7 +20,7 @@
20 20
21/ { 21/ {
22 model = "Marvell Armada 38x family SoC"; 22 model = "Marvell Armada 38x family SoC";
23 compatible = "marvell,armada38x"; 23 compatible = "marvell,armada380";
24 24
25 aliases { 25 aliases {
26 gpio0 = &gpio0; 26 gpio0 = &gpio0;
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index b309c1c6e848..04927db1d6bf 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -568,24 +568,17 @@
568 #size-cells = <0>; 568 #size-cells = <0>;
569 #interrupt-cells = <1>; 569 #interrupt-cells = <1>;
570 570
571 slow_rc_osc: slow_rc_osc { 571 main_osc: main_osc {
572 compatible = "fixed-clock"; 572 compatible = "atmel,at91rm9200-clk-main-osc";
573 #clock-cells = <0>; 573 #clock-cells = <0>;
574 clock-frequency = <32768>; 574 interrupts-extended = <&pmc AT91_PMC_MOSCS>;
575 clock-accuracy = <50000000>; 575 clocks = <&main_xtal>;
576 };
577
578 clk32k: slck {
579 compatible = "atmel,at91sam9260-clk-slow";
580 #clock-cells = <0>;
581 clocks = <&slow_rc_osc &slow_xtal>;
582 }; 576 };
583 577
584 main: mainck { 578 main: mainck {
585 compatible = "atmel,at91rm9200-clk-main"; 579 compatible = "atmel,at91rm9200-clk-main";
586 #clock-cells = <0>; 580 #clock-cells = <0>;
587 interrupts-extended = <&pmc AT91_PMC_MOSCS>; 581 clocks = <&main_osc>;
588 clocks = <&main_xtal>;
589 }; 582 };
590 583
591 plla: pllack { 584 plla: pllack {
@@ -615,7 +608,7 @@
615 compatible = "atmel,at91rm9200-clk-master"; 608 compatible = "atmel,at91rm9200-clk-master";
616 #clock-cells = <0>; 609 #clock-cells = <0>;
617 interrupts-extended = <&pmc AT91_PMC_MCKRDY>; 610 interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
618 clocks = <&clk32k>, <&main>, <&plla>, <&pllb>; 611 clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
619 atmel,clk-output-range = <0 94000000>; 612 atmel,clk-output-range = <0 94000000>;
620 atmel,clk-divisors = <1 2 4 0>; 613 atmel,clk-divisors = <1 2 4 0>;
621 }; 614 };
@@ -632,7 +625,7 @@
632 #address-cells = <1>; 625 #address-cells = <1>;
633 #size-cells = <0>; 626 #size-cells = <0>;
634 interrupt-parent = <&pmc>; 627 interrupt-parent = <&pmc>;
635 clocks = <&clk32k>, <&main>, <&plla>, <&pllb>; 628 clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
636 629
637 prog0: prog0 { 630 prog0: prog0 {
638 #clock-cells = <0>; 631 #clock-cells = <0>;
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index c6683ea8b743..aa35a7aec9a8 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -20,6 +20,10 @@
20 reg = <0x20000000 0x4000000>; 20 reg = <0x20000000 0x4000000>;
21 }; 21 };
22 22
23 slow_xtal {
24 clock-frequency = <32768>;
25 };
26
23 main_xtal { 27 main_xtal {
24 clock-frequency = <18432000>; 28 clock-frequency = <18432000>;
25 }; 29 };
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index d1b82e6635d5..287795985e32 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -132,8 +132,8 @@
132 <595000000 650000000 3 0>, 132 <595000000 650000000 3 0>,
133 <545000000 600000000 0 1>, 133 <545000000 600000000 0 1>,
134 <495000000 555000000 1 1>, 134 <495000000 555000000 1 1>,
135 <445000000 500000000 1 2>, 135 <445000000 500000000 2 1>,
136 <400000000 450000000 1 3>; 136 <400000000 450000000 3 1>;
137 }; 137 };
138 138
139 plladiv: plladivck { 139 plladiv: plladivck {
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 1a57298636a5..d6133f497207 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -140,8 +140,8 @@
140 595000000 650000000 3 0 140 595000000 650000000 3 0
141 545000000 600000000 0 1 141 545000000 600000000 0 1
142 495000000 555000000 1 1 142 495000000 555000000 1 1
143 445000000 500000000 1 2 143 445000000 500000000 2 1
144 400000000 450000000 1 3>; 144 400000000 450000000 3 1>;
145 }; 145 };
146 146
147 plladiv: plladivck { 147 plladiv: plladivck {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8ece4be41ca..fbaf426d2daa 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -113,7 +113,7 @@
113 compatible = "arm,cortex-a9-gic"; 113 compatible = "arm,cortex-a9-gic";
114 #interrupt-cells = <3>; 114 #interrupt-cells = <3>;
115 interrupt-controller; 115 interrupt-controller;
116 reg = <0x10490000 0x1000>, <0x10480000 0x100>; 116 reg = <0x10490000 0x10000>, <0x10480000 0x10000>;
117 }; 117 };
118 118
119 combiner: interrupt-controller@10440000 { 119 combiner: interrupt-controller@10440000 {
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6bc3243a80d3..181d77fa2fa6 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -315,15 +315,15 @@
315&esdhc1 { 315&esdhc1 {
316 pinctrl-names = "default"; 316 pinctrl-names = "default";
317 pinctrl-0 = <&pinctrl_esdhc1>; 317 pinctrl-0 = <&pinctrl_esdhc1>;
318 fsl,cd-controller; 318 cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
319 fsl,wp-controller; 319 wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
320 status = "okay"; 320 status = "okay";
321}; 321};
322 322
323&esdhc2 { 323&esdhc2 {
324 pinctrl-names = "default"; 324 pinctrl-names = "default";
325 pinctrl-0 = <&pinctrl_esdhc2>; 325 pinctrl-0 = <&pinctrl_esdhc2>;
326 cd-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; 326 cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
327 wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>; 327 wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
328 status = "okay"; 328 status = "okay";
329}; 329};
@@ -468,8 +468,8 @@
468 MX51_PAD_SD1_DATA1__SD1_DATA1 0x20d5 468 MX51_PAD_SD1_DATA1__SD1_DATA1 0x20d5
469 MX51_PAD_SD1_DATA2__SD1_DATA2 0x20d5 469 MX51_PAD_SD1_DATA2__SD1_DATA2 0x20d5
470 MX51_PAD_SD1_DATA3__SD1_DATA3 0x20d5 470 MX51_PAD_SD1_DATA3__SD1_DATA3 0x20d5
471 MX51_PAD_GPIO1_0__SD1_CD 0x20d5 471 MX51_PAD_GPIO1_0__GPIO1_0 0x100
472 MX51_PAD_GPIO1_1__SD1_WP 0x20d5 472 MX51_PAD_GPIO1_1__GPIO1_1 0x100
473 >; 473 >;
474 }; 474 };
475 475
diff --git a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
index 75e66c9c6144..31cfb7f2b02e 100644
--- a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
+++ b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
@@ -107,7 +107,7 @@
107&esdhc1 { 107&esdhc1 {
108 pinctrl-names = "default"; 108 pinctrl-names = "default";
109 pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>; 109 pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>;
110 fsl,cd-controller; 110 cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
111 status = "okay"; 111 status = "okay";
112}; 112};
113 113
@@ -206,7 +206,7 @@
206 206
207 pinctrl_esdhc1_cd: esdhc1_cd { 207 pinctrl_esdhc1_cd: esdhc1_cd {
208 fsl,pins = < 208 fsl,pins = <
209 MX51_PAD_GPIO1_0__SD1_CD 0x20d5 209 MX51_PAD_GPIO1_0__GPIO1_0 0xd5
210 >; 210 >;
211 }; 211 };
212 212
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d5d146a8b149..c4956b0ffb35 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -21,27 +21,25 @@
21 <0xb0000000 0x20000000>; 21 <0xb0000000 0x20000000>;
22 }; 22 };
23 23
24 soc { 24 display1: display@di1 {
25 display1: display@di1 { 25 compatible = "fsl,imx-parallel-display";
26 compatible = "fsl,imx-parallel-display"; 26 interface-pix-fmt = "bgr666";
27 interface-pix-fmt = "bgr666"; 27 pinctrl-names = "default";
28 pinctrl-names = "default"; 28 pinctrl-0 = <&pinctrl_ipu_disp1>;
29 pinctrl-0 = <&pinctrl_ipu_disp1>; 29
30 30 display-timings {
31 display-timings { 31 800x480p60 {
32 800x480p60 { 32 native-mode;
33 native-mode; 33 clock-frequency = <31500000>;
34 clock-frequency = <31500000>; 34 hactive = <800>;
35 hactive = <800>; 35 vactive = <480>;
36 vactive = <480>; 36 hfront-porch = <40>;
37 hfront-porch = <40>; 37 hback-porch = <88>;
38 hback-porch = <88>; 38 hsync-len = <128>;
39 hsync-len = <128>; 39 vback-porch = <33>;
40 vback-porch = <33>; 40 vfront-porch = <9>;
41 vfront-porch = <9>; 41 vsync-len = <3>;
42 vsync-len = <3>; 42 vsync-active = <1>;
43 vsync-active = <1>;
44 };
45 }; 43 };
46 }; 44 };
47 45
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index 5373a5f2782b..c8e51dd41b8f 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -143,6 +143,14 @@
143 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>; 143 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
144 }; 144 };
145 145
146 pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
147 /*
148 * Similar to pinctrl_usbotg_2, but we want it
149 * pulled down for a fixed host connection.
150 */
151 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
152 };
153
146 pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus { 154 pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
147 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>; 155 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
148 }; 156 };
@@ -178,6 +186,8 @@
178}; 186};
179 187
180&usbotg { 188&usbotg {
189 pinctrl-names = "default";
190 pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
181 vbus-supply = <&reg_usbotg_vbus>; 191 vbus-supply = <&reg_usbotg_vbus>;
182 status = "okay"; 192 status = "okay";
183}; 193};
diff --git a/arch/arm/boot/dts/imx6q-gw51xx.dts b/arch/arm/boot/dts/imx6q-gw51xx.dts
index af4929aee075..0e1406e58eff 100644
--- a/arch/arm/boot/dts/imx6q-gw51xx.dts
+++ b/arch/arm/boot/dts/imx6q-gw51xx.dts
@@ -11,7 +11,7 @@
11 11
12/dts-v1/; 12/dts-v1/;
13#include "imx6q.dtsi" 13#include "imx6q.dtsi"
14#include "imx6qdl-gw54xx.dtsi" 14#include "imx6qdl-gw51xx.dtsi"
15 15
16/ { 16/ {
17 model = "Gateworks Ventana i.MX6 Quad GW51XX"; 17 model = "Gateworks Ventana i.MX6 Quad GW51XX";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 25da82a03110..e8e781656b3f 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -12,6 +12,19 @@
12 pinctrl-0 = <&pinctrl_cubox_i_ir>; 12 pinctrl-0 = <&pinctrl_cubox_i_ir>;
13 }; 13 };
14 14
15 pwmleds {
16 compatible = "pwm-leds";
17 pinctrl-names = "default";
18 pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
19
20 front {
21 active-low;
22 label = "imx6:red:front";
23 max-brightness = <248>;
24 pwms = <&pwm1 0 50000>;
25 };
26 };
27
15 regulators { 28 regulators {
16 compatible = "simple-bus"; 29 compatible = "simple-bus";
17 30
@@ -109,6 +122,10 @@
109 >; 122 >;
110 }; 123 };
111 124
125 pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
126 fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
127 };
128
112 pinctrl_cubox_i_spdif: cubox-i-spdif { 129 pinctrl_cubox_i_spdif: cubox-i-spdif {
113 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 130 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
114 }; 131 };
@@ -117,6 +134,14 @@
117 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>; 134 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
118 }; 135 };
119 136
137 pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
138 /*
139 * The Cubox-i pulls this low, but as it's pointless
140 * leaving it as a pull-up, even if it is just 10uA.
141 */
142 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
143 };
144
120 pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus { 145 pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
121 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>; 146 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
122 }; 147 };
@@ -153,6 +178,8 @@
153}; 178};
154 179
155&usbotg { 180&usbotg {
181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
156 vbus-supply = <&reg_usbotg_vbus>; 183 vbus-supply = <&reg_usbotg_vbus>;
157 status = "okay"; 184 status = "okay";
158}; 185};
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 31665adcbf39..0db15af41cb1 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -161,7 +161,7 @@
161 status = "okay"; 161 status = "okay";
162 162
163 pmic: ltc3676@3c { 163 pmic: ltc3676@3c {
164 compatible = "ltc,ltc3676"; 164 compatible = "lltc,ltc3676";
165 reg = <0x3c>; 165 reg = <0x3c>;
166 166
167 regulators { 167 regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 367af3ec9435..744c8a2d81f6 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -220,7 +220,7 @@
220 }; 220 };
221 221
222 pmic: ltc3676@3c { 222 pmic: ltc3676@3c {
223 compatible = "ltc,ltc3676"; 223 compatible = "lltc,ltc3676";
224 reg = <0x3c>; 224 reg = <0x3c>;
225 225
226 regulators { 226 regulators {
@@ -288,7 +288,7 @@
288 codec: sgtl5000@0a { 288 codec: sgtl5000@0a {
289 compatible = "fsl,sgtl5000"; 289 compatible = "fsl,sgtl5000";
290 reg = <0x0a>; 290 reg = <0x0a>;
291 clocks = <&clks 169>; 291 clocks = <&clks 201>;
292 VDDA-supply = <&reg_1p8v>; 292 VDDA-supply = <&reg_1p8v>;
293 VDDIO-supply = <&reg_3p3v>; 293 VDDIO-supply = <&reg_3p3v>;
294 }; 294 };
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index c91b5a6c769b..adf150c1be90 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -234,7 +234,7 @@
234 }; 234 };
235 235
236 pmic: ltc3676@3c { 236 pmic: ltc3676@3c {
237 compatible = "ltc,ltc3676"; 237 compatible = "lltc,ltc3676";
238 reg = <0x3c>; 238 reg = <0x3c>;
239 239
240 regulators { 240 regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
index d729d0b15f25..79eac6849d4c 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
@@ -10,14 +10,6 @@
10 MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1 10 MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
11 >; 11 >;
12 }; 12 };
13
14 pinctrl_microsom_usbotg: microsom-usbotg {
15 /*
16 * Similar to pinctrl_usbotg_2, but we want it
17 * pulled down for a fixed host connection.
18 */
19 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
20 };
21 }; 13 };
22}; 14};
23 15
@@ -26,8 +18,3 @@
26 pinctrl-0 = <&pinctrl_microsom_uart1>; 18 pinctrl-0 = <&pinctrl_microsom_uart1>;
27 status = "okay"; 19 status = "okay";
28}; 20};
29
30&usbotg {
31 pinctrl-names = "default";
32 pinctrl-0 = <&pinctrl_microsom_usbotg>;
33};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 2d4e5285f3f3..57d4abe03a94 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -686,7 +686,7 @@
686 compatible = "fsl,imx6sl-fec", "fsl,imx25-fec"; 686 compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
687 reg = <0x02188000 0x4000>; 687 reg = <0x02188000 0x4000>;
688 interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>; 688 interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
689 clocks = <&clks IMX6SL_CLK_ENET_REF>, 689 clocks = <&clks IMX6SL_CLK_ENET>,
690 <&clks IMX6SL_CLK_ENET_REF>; 690 <&clks IMX6SL_CLK_ENET_REF>;
691 clock-names = "ipg", "ahb"; 691 clock-names = "ipg", "ahb";
692 status = "disabled"; 692 status = "disabled";
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
index c5a1fc75c7a3..b2d9834bf458 100644
--- a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
+++ b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
@@ -105,7 +105,6 @@
105 compatible = "ethernet-phy-id0141.0cb0", 105 compatible = "ethernet-phy-id0141.0cb0",
106 "ethernet-phy-ieee802.3-c22"; 106 "ethernet-phy-ieee802.3-c22";
107 reg = <0>; 107 reg = <0>;
108 phy-connection-type = "rgmii-id";
109 }; 108 };
110 109
111 ethphy1: ethernet-phy@1 { 110 ethphy1: ethernet-phy@1 {
@@ -113,7 +112,6 @@
113 compatible = "ethernet-phy-id0141.0cb0", 112 compatible = "ethernet-phy-id0141.0cb0",
114 "ethernet-phy-ieee802.3-c22"; 113 "ethernet-phy-ieee802.3-c22";
115 reg = <1>; 114 reg = <1>;
116 phy-connection-type = "rgmii-id";
117 }; 115 };
118}; 116};
119 117
@@ -121,6 +119,7 @@
121 status = "okay"; 119 status = "okay";
122 ethernet0-port@0 { 120 ethernet0-port@0 {
123 phy-handle = <&ethphy0>; 121 phy-handle = <&ethphy0>;
122 phy-connection-type = "rgmii-id";
124 }; 123 };
125}; 124};
126 125
@@ -128,5 +127,6 @@
128 status = "okay"; 127 status = "okay";
129 ethernet1-port@0 { 128 ethernet1-port@0 {
130 phy-handle = <&ethphy1>; 129 phy-handle = <&ethphy1>;
130 phy-connection-type = "rgmii-id";
131 }; 131 };
132}; 132};
diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi
index d6f254f302fe..a0f6f75fe3b5 100644
--- a/arch/arm/boot/dts/stih415.dtsi
+++ b/arch/arm/boot/dts/stih415.dtsi
@@ -169,8 +169,8 @@
169 169
170 pinctrl-names = "default"; 170 pinctrl-names = "default";
171 pinctrl-0 = <&pinctrl_mii0>; 171 pinctrl-0 = <&pinctrl_mii0>;
172 clock-names = "stmmaceth"; 172 clock-names = "stmmaceth", "sti-ethclk";
173 clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; 173 clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
174 }; 174 };
175 175
176 ethernet1: dwmac@fef08000 { 176 ethernet1: dwmac@fef08000 {
@@ -192,8 +192,8 @@
192 reset-names = "stmmaceth"; 192 reset-names = "stmmaceth";
193 pinctrl-names = "default"; 193 pinctrl-names = "default";
194 pinctrl-0 = <&pinctrl_mii1>; 194 pinctrl-0 = <&pinctrl_mii1>;
195 clock-names = "stmmaceth"; 195 clock-names = "stmmaceth", "sti-ethclk";
196 clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; 196 clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
197 }; 197 };
198 198
199 rc: rc@fe518000 { 199 rc: rc@fe518000 {
diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020e.dts
index ba0fa2caaf18..ba0fa2caaf18 100644
--- a/arch/arm/boot/dts/stih416-b2020-revE.dts
+++ b/arch/arm/boot/dts/stih416-b2020e.dts
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 06473c5d9ea9..84758d76d064 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -175,8 +175,8 @@
175 reset-names = "stmmaceth"; 175 reset-names = "stmmaceth";
176 pinctrl-names = "default"; 176 pinctrl-names = "default";
177 pinctrl-0 = <&pinctrl_mii0>; 177 pinctrl-0 = <&pinctrl_mii0>;
178 clock-names = "stmmaceth"; 178 clock-names = "stmmaceth", "sti-ethclk";
179 clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; 179 clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
180 }; 180 };
181 181
182 ethernet1: dwmac@fef08000 { 182 ethernet1: dwmac@fef08000 {
@@ -197,8 +197,8 @@
197 reset-names = "stmmaceth"; 197 reset-names = "stmmaceth";
198 pinctrl-names = "default"; 198 pinctrl-names = "default";
199 pinctrl-0 = <&pinctrl_mii1>; 199 pinctrl-0 = <&pinctrl_mii1>;
200 clock-names = "stmmaceth"; 200 clock-names = "stmmaceth", "sti-ethclk";
201 clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; 201 clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
202 }; 202 };
203 203
204 rc: rc@fe518000 { 204 rc: rc@fe518000 {
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index f91136ab447e..3c165fc2dce2 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -12,11 +12,13 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/irqflags.h> 14#include <linux/irqflags.h>
15#include <linux/cpu_pm.h>
15 16
16#include <asm/mcpm.h> 17#include <asm/mcpm.h>
17#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
18#include <asm/idmap.h> 19#include <asm/idmap.h>
19#include <asm/cputype.h> 20#include <asm/cputype.h>
21#include <asm/suspend.h>
20 22
21extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; 23extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
22 24
@@ -146,6 +148,56 @@ int mcpm_cpu_powered_up(void)
146 return 0; 148 return 0;
147} 149}
148 150
151#ifdef CONFIG_ARM_CPU_SUSPEND
152
153static int __init nocache_trampoline(unsigned long _arg)
154{
155 void (*cache_disable)(void) = (void *)_arg;
156 unsigned int mpidr = read_cpuid_mpidr();
157 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
158 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
159 phys_reset_t phys_reset;
160
161 mcpm_set_entry_vector(cpu, cluster, cpu_resume);
162 setup_mm_for_reboot();
163
164 __mcpm_cpu_going_down(cpu, cluster);
165 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster));
166 cache_disable();
167 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
168 __mcpm_cpu_down(cpu, cluster);
169
170 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
171 phys_reset(virt_to_phys(mcpm_entry_point));
172 BUG();
173}
174
175int __init mcpm_loopback(void (*cache_disable)(void))
176{
177 int ret;
178
179 /*
180 * We're going to soft-restart the current CPU through the
181 * low-level MCPM code by leveraging the suspend/resume
182 * infrastructure. Let's play it safe by using cpu_pm_enter()
183 * in case the CPU init code path resets the VFP or similar.
184 */
185 local_irq_disable();
186 local_fiq_disable();
187 ret = cpu_pm_enter();
188 if (!ret) {
189 ret = cpu_suspend((unsigned long)cache_disable, nocache_trampoline);
190 cpu_pm_exit();
191 }
192 local_fiq_enable();
193 local_irq_enable();
194 if (ret)
195 pr_err("%s returned %d\n", __func__, ret);
196 return ret;
197}
198
199#endif
200
149struct sync_struct mcpm_sync; 201struct sync_struct mcpm_sync;
150 202
151/* 203/*
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index ef8815327e5b..59b7e45142d8 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -186,6 +186,7 @@ CONFIG_VIDEO_MX3=y
186CONFIG_V4L_MEM2MEM_DRIVERS=y 186CONFIG_V4L_MEM2MEM_DRIVERS=y
187CONFIG_VIDEO_CODA=y 187CONFIG_VIDEO_CODA=y
188CONFIG_SOC_CAMERA_OV2640=y 188CONFIG_SOC_CAMERA_OV2640=y
189CONFIG_IMX_IPUV3_CORE=y
189CONFIG_DRM=y 190CONFIG_DRM=y
190CONFIG_DRM_PANEL_SIMPLE=y 191CONFIG_DRM_PANEL_SIMPLE=y
191CONFIG_BACKLIGHT_LCD_SUPPORT=y 192CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 17d9462b9fb9..be1a3455a9fe 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -353,6 +353,7 @@ CONFIG_MFD_NVEC=y
353CONFIG_KEYBOARD_NVEC=y 353CONFIG_KEYBOARD_NVEC=y
354CONFIG_SERIO_NVEC_PS2=y 354CONFIG_SERIO_NVEC_PS2=y
355CONFIG_NVEC_POWER=y 355CONFIG_NVEC_POWER=y
356CONFIG_QCOM_GSBI=y
356CONFIG_COMMON_CLK_QCOM=y 357CONFIG_COMMON_CLK_QCOM=y
357CONFIG_MSM_GCC_8660=y 358CONFIG_MSM_GCC_8660=y
358CONFIG_MSM_MMCC_8960=y 359CONFIG_MSM_MMCC_8960=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index e11170e37442..b0bfefa23902 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -14,6 +14,7 @@ CONFIG_MACH_ARMADA_370=y
14CONFIG_MACH_ARMADA_375=y 14CONFIG_MACH_ARMADA_375=y
15CONFIG_MACH_ARMADA_38X=y 15CONFIG_MACH_ARMADA_38X=y
16CONFIG_MACH_ARMADA_XP=y 16CONFIG_MACH_ARMADA_XP=y
17CONFIG_MACH_DOVE=y
17CONFIG_NEON=y 18CONFIG_NEON=y
18# CONFIG_CACHE_L2X0 is not set 19# CONFIG_CACHE_L2X0 is not set
19# CONFIG_SWP_EMULATE is not set 20# CONFIG_SWP_EMULATE is not set
@@ -52,6 +53,7 @@ CONFIG_INPUT_EVDEV=y
52CONFIG_KEYBOARD_GPIO=y 53CONFIG_KEYBOARD_GPIO=y
53CONFIG_SERIAL_8250=y 54CONFIG_SERIAL_8250=y
54CONFIG_SERIAL_8250_CONSOLE=y 55CONFIG_SERIAL_8250_CONSOLE=y
56CONFIG_SERIAL_OF_PLATFORM=y
55CONFIG_I2C=y 57CONFIG_I2C=y
56CONFIG_SPI=y 58CONFIG_SPI=y
57CONFIG_SPI_ORION=y 59CONFIG_SPI_ORION=y
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 81cda39860c5..b48fa341648d 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -5,10 +5,14 @@
5obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o 5obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
6obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o 6obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
7obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o 7obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
8obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
9obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
8 10
9aes-arm-y := aes-armv4.o aes_glue.o 11aes-arm-y := aes-armv4.o aes_glue.o
10aes-arm-bs-y := aesbs-core.o aesbs-glue.o 12aes-arm-bs-y := aesbs-core.o aesbs-glue.o
11sha1-arm-y := sha1-armv4-large.o sha1_glue.o 13sha1-arm-y := sha1-armv4-large.o sha1_glue.o
14sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
15sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
12 16
13quiet_cmd_perl = PERL $@ 17quiet_cmd_perl = PERL $@
14 cmd_perl = $(PERL) $(<) > $(@) 18 cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S
index 3a14ea8fe97e..ebb9761fb572 100644
--- a/arch/arm/crypto/aes-armv4.S
+++ b/arch/arm/crypto/aes-armv4.S
@@ -35,6 +35,7 @@
35@ that is being targetted. 35@ that is being targetted.
36 36
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <asm/assembler.h>
38 39
39.text 40.text
40 41
@@ -648,7 +649,7 @@ _armv4_AES_set_encrypt_key:
648 649
649.Ldone: mov r0,#0 650.Ldone: mov r0,#0
650 ldmia sp!,{r4-r12,lr} 651 ldmia sp!,{r4-r12,lr}
651.Labrt: mov pc,lr 652.Labrt: ret lr
652ENDPROC(private_AES_set_encrypt_key) 653ENDPROC(private_AES_set_encrypt_key)
653 654
654.align 5 655.align 5
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
new file mode 100644
index 000000000000..50013c0e2864
--- /dev/null
+++ b/arch/arm/crypto/sha1-armv7-neon.S
@@ -0,0 +1,634 @@
1/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
2 *
3 * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11#include <linux/linkage.h>
12
13
14.syntax unified
15.code 32
16.fpu neon
17
18.text
19
20
21/* Context structure */
22
23#define state_h0 0
24#define state_h1 4
25#define state_h2 8
26#define state_h3 12
27#define state_h4 16
28
29
30/* Constants */
31
32#define K1 0x5A827999
33#define K2 0x6ED9EBA1
34#define K3 0x8F1BBCDC
35#define K4 0xCA62C1D6
36.align 4
37.LK_VEC:
38.LK1: .long K1, K1, K1, K1
39.LK2: .long K2, K2, K2, K2
40.LK3: .long K3, K3, K3, K3
41.LK4: .long K4, K4, K4, K4
42
43
44/* Register macros */
45
46#define RSTATE r0
47#define RDATA r1
48#define RNBLKS r2
49#define ROLDSTACK r3
50#define RWK lr
51
52#define _a r4
53#define _b r5
54#define _c r6
55#define _d r7
56#define _e r8
57
58#define RT0 r9
59#define RT1 r10
60#define RT2 r11
61#define RT3 r12
62
63#define W0 q0
64#define W1 q1
65#define W2 q2
66#define W3 q3
67#define W4 q4
68#define W5 q5
69#define W6 q6
70#define W7 q7
71
72#define tmp0 q8
73#define tmp1 q9
74#define tmp2 q10
75#define tmp3 q11
76
77#define qK1 q12
78#define qK2 q13
79#define qK3 q14
80#define qK4 q15
81
82
83/* Round function macros. */
84
85#define WK_offs(i) (((i) & 15) * 4)
86
87#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
88 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
89 ldr RT3, [sp, WK_offs(i)]; \
90 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
91 bic RT0, d, b; \
92 add e, e, a, ror #(32 - 5); \
93 and RT1, c, b; \
94 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
95 add RT0, RT0, RT3; \
96 add e, e, RT1; \
97 ror b, #(32 - 30); \
98 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
99 add e, e, RT0;
100
101#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
102 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
103 ldr RT3, [sp, WK_offs(i)]; \
104 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
105 eor RT0, d, b; \
106 add e, e, a, ror #(32 - 5); \
107 eor RT0, RT0, c; \
108 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
109 add e, e, RT3; \
110 ror b, #(32 - 30); \
111 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
112 add e, e, RT0; \
113
114#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
115 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
116 ldr RT3, [sp, WK_offs(i)]; \
117 pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
118 eor RT0, b, c; \
119 and RT1, b, c; \
120 add e, e, a, ror #(32 - 5); \
121 pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
122 and RT0, RT0, d; \
123 add RT1, RT1, RT3; \
124 add e, e, RT0; \
125 ror b, #(32 - 30); \
126 pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
127 add e, e, RT1;
128
129#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
130 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
131 _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
132 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
133
134#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
135 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
136 _R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
137 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
138
139#define R(a,b,c,d,e,f,i) \
140 _R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
141 W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
142
143#define dummy(...)
144
145
146/* Input expansion macros. */
147
148/********* Precalc macros for rounds 0-15 *************************************/
149
150#define W_PRECALC_00_15() \
151 add RWK, sp, #(WK_offs(0)); \
152 \
153 vld1.32 {tmp0, tmp1}, [RDATA]!; \
154 vrev32.8 W0, tmp0; /* big => little */ \
155 vld1.32 {tmp2, tmp3}, [RDATA]!; \
156 vadd.u32 tmp0, W0, curK; \
157 vrev32.8 W7, tmp1; /* big => little */ \
158 vrev32.8 W6, tmp2; /* big => little */ \
159 vadd.u32 tmp1, W7, curK; \
160 vrev32.8 W5, tmp3; /* big => little */ \
161 vadd.u32 tmp2, W6, curK; \
162 vst1.32 {tmp0, tmp1}, [RWK]!; \
163 vadd.u32 tmp3, W5, curK; \
164 vst1.32 {tmp2, tmp3}, [RWK]; \
165
166#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
167 vld1.32 {tmp0, tmp1}, [RDATA]!; \
168
169#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
170 add RWK, sp, #(WK_offs(0)); \
171
172#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
173 vrev32.8 W0, tmp0; /* big => little */ \
174
175#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
176 vld1.32 {tmp2, tmp3}, [RDATA]!; \
177
178#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
179 vadd.u32 tmp0, W0, curK; \
180
181#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
182 vrev32.8 W7, tmp1; /* big => little */ \
183
184#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
185 vrev32.8 W6, tmp2; /* big => little */ \
186
187#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
188 vadd.u32 tmp1, W7, curK; \
189
190#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
191 vrev32.8 W5, tmp3; /* big => little */ \
192
193#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
194 vadd.u32 tmp2, W6, curK; \
195
196#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
197 vst1.32 {tmp0, tmp1}, [RWK]!; \
198
199#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
200 vadd.u32 tmp3, W5, curK; \
201
202#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
203 vst1.32 {tmp2, tmp3}, [RWK]; \
204
205
206/********* Precalc macros for rounds 16-31 ************************************/
207
208#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
209 veor tmp0, tmp0; \
210 vext.8 W, W_m16, W_m12, #8; \
211
212#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
213 add RWK, sp, #(WK_offs(i)); \
214 vext.8 tmp0, W_m04, tmp0, #4; \
215
216#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
217 veor tmp0, tmp0, W_m16; \
218 veor.32 W, W, W_m08; \
219
220#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
221 veor tmp1, tmp1; \
222 veor W, W, tmp0; \
223
224#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
225 vshl.u32 tmp0, W, #1; \
226
227#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
228 vext.8 tmp1, tmp1, W, #(16-12); \
229 vshr.u32 W, W, #31; \
230
231#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
232 vorr tmp0, tmp0, W; \
233 vshr.u32 W, tmp1, #30; \
234
235#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
236 vshl.u32 tmp1, tmp1, #2; \
237
238#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
239 veor tmp0, tmp0, W; \
240
241#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
242 veor W, tmp0, tmp1; \
243
244#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
245 vadd.u32 tmp0, W, curK; \
246
247#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
248 vst1.32 {tmp0}, [RWK];
249
250
251/********* Precalc macros for rounds 32-79 ************************************/
252
253#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
254 veor W, W_m28; \
255
256#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
257 vext.8 tmp0, W_m08, W_m04, #8; \
258
259#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
260 veor W, W_m16; \
261
262#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
263 veor W, tmp0; \
264
265#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
266 add RWK, sp, #(WK_offs(i&~3)); \
267
268#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
269 vshl.u32 tmp1, W, #2; \
270
271#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
272 vshr.u32 tmp0, W, #30; \
273
274#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
275 vorr W, tmp0, tmp1; \
276
277#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
278 vadd.u32 tmp0, W, curK; \
279
280#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
281 vst1.32 {tmp0}, [RWK];
282
283
284/*
285 * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
286 *
287 * unsigned int
288 * sha1_transform_neon (void *ctx, const unsigned char *data,
289 * unsigned int nblks)
290 */
291.align 3
292ENTRY(sha1_transform_neon)
293 /* input:
294 * r0: ctx, CTX
295 * r1: data (64*nblks bytes)
296 * r2: nblks
297 */
298
299 cmp RNBLKS, #0;
300 beq .Ldo_nothing;
301
302 push {r4-r12, lr};
303 /*vpush {q4-q7};*/
304
305 adr RT3, .LK_VEC;
306
307 mov ROLDSTACK, sp;
308
309 /* Align stack. */
310 sub RT0, sp, #(16*4);
311 and RT0, #(~(16-1));
312 mov sp, RT0;
313
314 vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
315
316 /* Get the values of the chaining variables. */
317 ldm RSTATE, {_a-_e};
318
319 vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
320
321#undef curK
322#define curK qK1
323 /* Precalc 0-15. */
324 W_PRECALC_00_15();
325
326.Loop:
327 /* Transform 0-15 + Precalc 16-31. */
328 _R( _a, _b, _c, _d, _e, F1, 0,
329 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
330 W4, W5, W6, W7, W0, _, _, _ );
331 _R( _e, _a, _b, _c, _d, F1, 1,
332 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
333 W4, W5, W6, W7, W0, _, _, _ );
334 _R( _d, _e, _a, _b, _c, F1, 2,
335 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
336 W4, W5, W6, W7, W0, _, _, _ );
337 _R( _c, _d, _e, _a, _b, F1, 3,
338 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
339 W4, W5, W6, W7, W0, _, _, _ );
340
341#undef curK
342#define curK qK2
343 _R( _b, _c, _d, _e, _a, F1, 4,
344 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
345 W3, W4, W5, W6, W7, _, _, _ );
346 _R( _a, _b, _c, _d, _e, F1, 5,
347 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
348 W3, W4, W5, W6, W7, _, _, _ );
349 _R( _e, _a, _b, _c, _d, F1, 6,
350 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
351 W3, W4, W5, W6, W7, _, _, _ );
352 _R( _d, _e, _a, _b, _c, F1, 7,
353 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
354 W3, W4, W5, W6, W7, _, _, _ );
355
356 _R( _c, _d, _e, _a, _b, F1, 8,
357 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
358 W2, W3, W4, W5, W6, _, _, _ );
359 _R( _b, _c, _d, _e, _a, F1, 9,
360 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
361 W2, W3, W4, W5, W6, _, _, _ );
362 _R( _a, _b, _c, _d, _e, F1, 10,
363 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
364 W2, W3, W4, W5, W6, _, _, _ );
365 _R( _e, _a, _b, _c, _d, F1, 11,
366 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
367 W2, W3, W4, W5, W6, _, _, _ );
368
369 _R( _d, _e, _a, _b, _c, F1, 12,
370 WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
371 W1, W2, W3, W4, W5, _, _, _ );
372 _R( _c, _d, _e, _a, _b, F1, 13,
373 WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
374 W1, W2, W3, W4, W5, _, _, _ );
375 _R( _b, _c, _d, _e, _a, F1, 14,
376 WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
377 W1, W2, W3, W4, W5, _, _, _ );
378 _R( _a, _b, _c, _d, _e, F1, 15,
379 WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
380 W1, W2, W3, W4, W5, _, _, _ );
381
382 /* Transform 16-63 + Precalc 32-79. */
383 _R( _e, _a, _b, _c, _d, F1, 16,
384 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
385 W0, W1, W2, W3, W4, W5, W6, W7);
386 _R( _d, _e, _a, _b, _c, F1, 17,
387 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
388 W0, W1, W2, W3, W4, W5, W6, W7);
389 _R( _c, _d, _e, _a, _b, F1, 18,
390 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32,
391 W0, W1, W2, W3, W4, W5, W6, W7);
392 _R( _b, _c, _d, _e, _a, F1, 19,
393 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32,
394 W0, W1, W2, W3, W4, W5, W6, W7);
395
396 _R( _a, _b, _c, _d, _e, F2, 20,
397 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
398 W7, W0, W1, W2, W3, W4, W5, W6);
399 _R( _e, _a, _b, _c, _d, F2, 21,
400 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
401 W7, W0, W1, W2, W3, W4, W5, W6);
402 _R( _d, _e, _a, _b, _c, F2, 22,
403 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36,
404 W7, W0, W1, W2, W3, W4, W5, W6);
405 _R( _c, _d, _e, _a, _b, F2, 23,
406 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36,
407 W7, W0, W1, W2, W3, W4, W5, W6);
408
409#undef curK
410#define curK qK3
411 _R( _b, _c, _d, _e, _a, F2, 24,
412 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
413 W6, W7, W0, W1, W2, W3, W4, W5);
414 _R( _a, _b, _c, _d, _e, F2, 25,
415 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
416 W6, W7, W0, W1, W2, W3, W4, W5);
417 _R( _e, _a, _b, _c, _d, F2, 26,
418 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40,
419 W6, W7, W0, W1, W2, W3, W4, W5);
420 _R( _d, _e, _a, _b, _c, F2, 27,
421 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40,
422 W6, W7, W0, W1, W2, W3, W4, W5);
423
424 _R( _c, _d, _e, _a, _b, F2, 28,
425 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
426 W5, W6, W7, W0, W1, W2, W3, W4);
427 _R( _b, _c, _d, _e, _a, F2, 29,
428 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
429 W5, W6, W7, W0, W1, W2, W3, W4);
430 _R( _a, _b, _c, _d, _e, F2, 30,
431 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44,
432 W5, W6, W7, W0, W1, W2, W3, W4);
433 _R( _e, _a, _b, _c, _d, F2, 31,
434 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44,
435 W5, W6, W7, W0, W1, W2, W3, W4);
436
437 _R( _d, _e, _a, _b, _c, F2, 32,
438 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
439 W4, W5, W6, W7, W0, W1, W2, W3);
440 _R( _c, _d, _e, _a, _b, F2, 33,
441 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
442 W4, W5, W6, W7, W0, W1, W2, W3);
443 _R( _b, _c, _d, _e, _a, F2, 34,
444 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48,
445 W4, W5, W6, W7, W0, W1, W2, W3);
446 _R( _a, _b, _c, _d, _e, F2, 35,
447 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48,
448 W4, W5, W6, W7, W0, W1, W2, W3);
449
450 _R( _e, _a, _b, _c, _d, F2, 36,
451 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
452 W3, W4, W5, W6, W7, W0, W1, W2);
453 _R( _d, _e, _a, _b, _c, F2, 37,
454 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
455 W3, W4, W5, W6, W7, W0, W1, W2);
456 _R( _c, _d, _e, _a, _b, F2, 38,
457 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52,
458 W3, W4, W5, W6, W7, W0, W1, W2);
459 _R( _b, _c, _d, _e, _a, F2, 39,
460 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52,
461 W3, W4, W5, W6, W7, W0, W1, W2);
462
463 _R( _a, _b, _c, _d, _e, F3, 40,
464 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
465 W2, W3, W4, W5, W6, W7, W0, W1);
466 _R( _e, _a, _b, _c, _d, F3, 41,
467 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
468 W2, W3, W4, W5, W6, W7, W0, W1);
469 _R( _d, _e, _a, _b, _c, F3, 42,
470 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56,
471 W2, W3, W4, W5, W6, W7, W0, W1);
472 _R( _c, _d, _e, _a, _b, F3, 43,
473 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56,
474 W2, W3, W4, W5, W6, W7, W0, W1);
475
476#undef curK
477#define curK qK4
478 _R( _b, _c, _d, _e, _a, F3, 44,
479 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
480 W1, W2, W3, W4, W5, W6, W7, W0);
481 _R( _a, _b, _c, _d, _e, F3, 45,
482 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
483 W1, W2, W3, W4, W5, W6, W7, W0);
484 _R( _e, _a, _b, _c, _d, F3, 46,
485 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60,
486 W1, W2, W3, W4, W5, W6, W7, W0);
487 _R( _d, _e, _a, _b, _c, F3, 47,
488 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60,
489 W1, W2, W3, W4, W5, W6, W7, W0);
490
491 _R( _c, _d, _e, _a, _b, F3, 48,
492 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
493 W0, W1, W2, W3, W4, W5, W6, W7);
494 _R( _b, _c, _d, _e, _a, F3, 49,
495 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
496 W0, W1, W2, W3, W4, W5, W6, W7);
497 _R( _a, _b, _c, _d, _e, F3, 50,
498 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64,
499 W0, W1, W2, W3, W4, W5, W6, W7);
500 _R( _e, _a, _b, _c, _d, F3, 51,
501 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64,
502 W0, W1, W2, W3, W4, W5, W6, W7);
503
504 _R( _d, _e, _a, _b, _c, F3, 52,
505 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
506 W7, W0, W1, W2, W3, W4, W5, W6);
507 _R( _c, _d, _e, _a, _b, F3, 53,
508 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
509 W7, W0, W1, W2, W3, W4, W5, W6);
510 _R( _b, _c, _d, _e, _a, F3, 54,
511 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68,
512 W7, W0, W1, W2, W3, W4, W5, W6);
513 _R( _a, _b, _c, _d, _e, F3, 55,
514 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68,
515 W7, W0, W1, W2, W3, W4, W5, W6);
516
517 _R( _e, _a, _b, _c, _d, F3, 56,
518 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
519 W6, W7, W0, W1, W2, W3, W4, W5);
520 _R( _d, _e, _a, _b, _c, F3, 57,
521 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
522 W6, W7, W0, W1, W2, W3, W4, W5);
523 _R( _c, _d, _e, _a, _b, F3, 58,
524 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72,
525 W6, W7, W0, W1, W2, W3, W4, W5);
526 _R( _b, _c, _d, _e, _a, F3, 59,
527 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72,
528 W6, W7, W0, W1, W2, W3, W4, W5);
529
530 subs RNBLKS, #1;
531
532 _R( _a, _b, _c, _d, _e, F4, 60,
533 WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
534 W5, W6, W7, W0, W1, W2, W3, W4);
535 _R( _e, _a, _b, _c, _d, F4, 61,
536 WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
537 W5, W6, W7, W0, W1, W2, W3, W4);
538 _R( _d, _e, _a, _b, _c, F4, 62,
539 WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76,
540 W5, W6, W7, W0, W1, W2, W3, W4);
541 _R( _c, _d, _e, _a, _b, F4, 63,
542 WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76,
543 W5, W6, W7, W0, W1, W2, W3, W4);
544
545 beq .Lend;
546
547 /* Transform 64-79 + Precalc 0-15 of next block. */
548#undef curK
549#define curK qK1
550 _R( _b, _c, _d, _e, _a, F4, 64,
551 WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
552 _R( _a, _b, _c, _d, _e, F4, 65,
553 WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
554 _R( _e, _a, _b, _c, _d, F4, 66,
555 WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
556 _R( _d, _e, _a, _b, _c, F4, 67,
557 WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
558
559 _R( _c, _d, _e, _a, _b, F4, 68,
560 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
561 _R( _b, _c, _d, _e, _a, F4, 69,
562 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
563 _R( _a, _b, _c, _d, _e, F4, 70,
564 WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
565 _R( _e, _a, _b, _c, _d, F4, 71,
566 WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
567
568 _R( _d, _e, _a, _b, _c, F4, 72,
569 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
570 _R( _c, _d, _e, _a, _b, F4, 73,
571 dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
572 _R( _b, _c, _d, _e, _a, F4, 74,
573 WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
574 _R( _a, _b, _c, _d, _e, F4, 75,
575 WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
576
577 _R( _e, _a, _b, _c, _d, F4, 76,
578 WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
579 _R( _d, _e, _a, _b, _c, F4, 77,
580 WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
581 _R( _c, _d, _e, _a, _b, F4, 78,
582 WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
583 _R( _b, _c, _d, _e, _a, F4, 79,
584 WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
585
586 /* Update the chaining variables. */
587 ldm RSTATE, {RT0-RT3};
588 add _a, RT0;
589 ldr RT0, [RSTATE, #state_h4];
590 add _b, RT1;
591 add _c, RT2;
592 add _d, RT3;
593 add _e, RT0;
594 stm RSTATE, {_a-_e};
595
596 b .Loop;
597
598.Lend:
599 /* Transform 64-79 */
600 R( _b, _c, _d, _e, _a, F4, 64 );
601 R( _a, _b, _c, _d, _e, F4, 65 );
602 R( _e, _a, _b, _c, _d, F4, 66 );
603 R( _d, _e, _a, _b, _c, F4, 67 );
604 R( _c, _d, _e, _a, _b, F4, 68 );
605 R( _b, _c, _d, _e, _a, F4, 69 );
606 R( _a, _b, _c, _d, _e, F4, 70 );
607 R( _e, _a, _b, _c, _d, F4, 71 );
608 R( _d, _e, _a, _b, _c, F4, 72 );
609 R( _c, _d, _e, _a, _b, F4, 73 );
610 R( _b, _c, _d, _e, _a, F4, 74 );
611 R( _a, _b, _c, _d, _e, F4, 75 );
612 R( _e, _a, _b, _c, _d, F4, 76 );
613 R( _d, _e, _a, _b, _c, F4, 77 );
614 R( _c, _d, _e, _a, _b, F4, 78 );
615 R( _b, _c, _d, _e, _a, F4, 79 );
616
617 mov sp, ROLDSTACK;
618
619 /* Update the chaining variables. */
620 ldm RSTATE, {RT0-RT3};
621 add _a, RT0;
622 ldr RT0, [RSTATE, #state_h4];
623 add _b, RT1;
624 add _c, RT2;
625 add _d, RT3;
626 /*vpop {q4-q7};*/
627 add _e, RT0;
628 stm RSTATE, {_a-_e};
629
630 pop {r4-r12, pc};
631
632.Ldo_nothing:
633 bx lr
634ENDPROC(sha1_transform_neon)
diff --git a/arch/arm/crypto/sha1_glue.c b/arch/arm/crypto/sha1_glue.c
index 76cd976230bc..84f2a756588b 100644
--- a/arch/arm/crypto/sha1_glue.c
+++ b/arch/arm/crypto/sha1_glue.c
@@ -23,32 +23,27 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <crypto/sha.h> 24#include <crypto/sha.h>
25#include <asm/byteorder.h> 25#include <asm/byteorder.h>
26#include <asm/crypto/sha1.h>
26 27
27struct SHA1_CTX {
28 uint32_t h0,h1,h2,h3,h4;
29 u64 count;
30 u8 data[SHA1_BLOCK_SIZE];
31};
32 28
33asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest, 29asmlinkage void sha1_block_data_order(u32 *digest,
34 const unsigned char *data, unsigned int rounds); 30 const unsigned char *data, unsigned int rounds);
35 31
36 32
37static int sha1_init(struct shash_desc *desc) 33static int sha1_init(struct shash_desc *desc)
38{ 34{
39 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 35 struct sha1_state *sctx = shash_desc_ctx(desc);
40 memset(sctx, 0, sizeof(*sctx)); 36
41 sctx->h0 = SHA1_H0; 37 *sctx = (struct sha1_state){
42 sctx->h1 = SHA1_H1; 38 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
43 sctx->h2 = SHA1_H2; 39 };
44 sctx->h3 = SHA1_H3; 40
45 sctx->h4 = SHA1_H4;
46 return 0; 41 return 0;
47} 42}
48 43
49 44
50static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data, 45static int __sha1_update(struct sha1_state *sctx, const u8 *data,
51 unsigned int len, unsigned int partial) 46 unsigned int len, unsigned int partial)
52{ 47{
53 unsigned int done = 0; 48 unsigned int done = 0;
54 49
@@ -56,43 +51,44 @@ static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
56 51
57 if (partial) { 52 if (partial) {
58 done = SHA1_BLOCK_SIZE - partial; 53 done = SHA1_BLOCK_SIZE - partial;
59 memcpy(sctx->data + partial, data, done); 54 memcpy(sctx->buffer + partial, data, done);
60 sha1_block_data_order(sctx, sctx->data, 1); 55 sha1_block_data_order(sctx->state, sctx->buffer, 1);
61 } 56 }
62 57
63 if (len - done >= SHA1_BLOCK_SIZE) { 58 if (len - done >= SHA1_BLOCK_SIZE) {
64 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; 59 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
65 sha1_block_data_order(sctx, data + done, rounds); 60 sha1_block_data_order(sctx->state, data + done, rounds);
66 done += rounds * SHA1_BLOCK_SIZE; 61 done += rounds * SHA1_BLOCK_SIZE;
67 } 62 }
68 63
69 memcpy(sctx->data, data + done, len - done); 64 memcpy(sctx->buffer, data + done, len - done);
70 return 0; 65 return 0;
71} 66}
72 67
73 68
74static int sha1_update(struct shash_desc *desc, const u8 *data, 69int sha1_update_arm(struct shash_desc *desc, const u8 *data,
75 unsigned int len) 70 unsigned int len)
76{ 71{
77 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 72 struct sha1_state *sctx = shash_desc_ctx(desc);
78 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; 73 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
79 int res; 74 int res;
80 75
81 /* Handle the fast case right here */ 76 /* Handle the fast case right here */
82 if (partial + len < SHA1_BLOCK_SIZE) { 77 if (partial + len < SHA1_BLOCK_SIZE) {
83 sctx->count += len; 78 sctx->count += len;
84 memcpy(sctx->data + partial, data, len); 79 memcpy(sctx->buffer + partial, data, len);
85 return 0; 80 return 0;
86 } 81 }
87 res = __sha1_update(sctx, data, len, partial); 82 res = __sha1_update(sctx, data, len, partial);
88 return res; 83 return res;
89} 84}
85EXPORT_SYMBOL_GPL(sha1_update_arm);
90 86
91 87
92/* Add padding and return the message digest. */ 88/* Add padding and return the message digest. */
93static int sha1_final(struct shash_desc *desc, u8 *out) 89static int sha1_final(struct shash_desc *desc, u8 *out)
94{ 90{
95 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 91 struct sha1_state *sctx = shash_desc_ctx(desc);
96 unsigned int i, index, padlen; 92 unsigned int i, index, padlen;
97 __be32 *dst = (__be32 *)out; 93 __be32 *dst = (__be32 *)out;
98 __be64 bits; 94 __be64 bits;
@@ -106,7 +102,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
106 /* We need to fill a whole block for __sha1_update() */ 102 /* We need to fill a whole block for __sha1_update() */
107 if (padlen <= 56) { 103 if (padlen <= 56) {
108 sctx->count += padlen; 104 sctx->count += padlen;
109 memcpy(sctx->data + index, padding, padlen); 105 memcpy(sctx->buffer + index, padding, padlen);
110 } else { 106 } else {
111 __sha1_update(sctx, padding, padlen, index); 107 __sha1_update(sctx, padding, padlen, index);
112 } 108 }
@@ -114,7 +110,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
114 110
115 /* Store state in digest */ 111 /* Store state in digest */
116 for (i = 0; i < 5; i++) 112 for (i = 0; i < 5; i++)
117 dst[i] = cpu_to_be32(((u32 *)sctx)[i]); 113 dst[i] = cpu_to_be32(sctx->state[i]);
118 114
119 /* Wipe context */ 115 /* Wipe context */
120 memset(sctx, 0, sizeof(*sctx)); 116 memset(sctx, 0, sizeof(*sctx));
@@ -124,7 +120,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
124 120
125static int sha1_export(struct shash_desc *desc, void *out) 121static int sha1_export(struct shash_desc *desc, void *out)
126{ 122{
127 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 123 struct sha1_state *sctx = shash_desc_ctx(desc);
128 memcpy(out, sctx, sizeof(*sctx)); 124 memcpy(out, sctx, sizeof(*sctx));
129 return 0; 125 return 0;
130} 126}
@@ -132,7 +128,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
132 128
133static int sha1_import(struct shash_desc *desc, const void *in) 129static int sha1_import(struct shash_desc *desc, const void *in)
134{ 130{
135 struct SHA1_CTX *sctx = shash_desc_ctx(desc); 131 struct sha1_state *sctx = shash_desc_ctx(desc);
136 memcpy(sctx, in, sizeof(*sctx)); 132 memcpy(sctx, in, sizeof(*sctx));
137 return 0; 133 return 0;
138} 134}
@@ -141,12 +137,12 @@ static int sha1_import(struct shash_desc *desc, const void *in)
141static struct shash_alg alg = { 137static struct shash_alg alg = {
142 .digestsize = SHA1_DIGEST_SIZE, 138 .digestsize = SHA1_DIGEST_SIZE,
143 .init = sha1_init, 139 .init = sha1_init,
144 .update = sha1_update, 140 .update = sha1_update_arm,
145 .final = sha1_final, 141 .final = sha1_final,
146 .export = sha1_export, 142 .export = sha1_export,
147 .import = sha1_import, 143 .import = sha1_import,
148 .descsize = sizeof(struct SHA1_CTX), 144 .descsize = sizeof(struct sha1_state),
149 .statesize = sizeof(struct SHA1_CTX), 145 .statesize = sizeof(struct sha1_state),
150 .base = { 146 .base = {
151 .cra_name = "sha1", 147 .cra_name = "sha1",
152 .cra_driver_name= "sha1-asm", 148 .cra_driver_name= "sha1-asm",
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
new file mode 100644
index 000000000000..6f1b411b1d55
--- /dev/null
+++ b/arch/arm/crypto/sha1_neon_glue.c
@@ -0,0 +1,197 @@
1/*
2 * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
3 * ARM NEON instructions.
4 *
5 * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 *
7 * This file is based on sha1_generic.c and sha1_ssse3_glue.c:
8 * Copyright (c) Alan Smithee.
9 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
10 * Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
11 * Copyright (c) Mathias Krause <minipli@googlemail.com>
12 * Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
17 * any later version.
18 *
19 */
20
21#include <crypto/internal/hash.h>
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/cryptohash.h>
26#include <linux/types.h>
27#include <crypto/sha.h>
28#include <asm/byteorder.h>
29#include <asm/neon.h>
30#include <asm/simd.h>
31#include <asm/crypto/sha1.h>
32
33
34asmlinkage void sha1_transform_neon(void *state_h, const char *data,
35 unsigned int rounds);
36
37
38static int sha1_neon_init(struct shash_desc *desc)
39{
40 struct sha1_state *sctx = shash_desc_ctx(desc);
41
42 *sctx = (struct sha1_state){
43 .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
44 };
45
46 return 0;
47}
48
49static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
50 unsigned int len, unsigned int partial)
51{
52 struct sha1_state *sctx = shash_desc_ctx(desc);
53 unsigned int done = 0;
54
55 sctx->count += len;
56
57 if (partial) {
58 done = SHA1_BLOCK_SIZE - partial;
59 memcpy(sctx->buffer + partial, data, done);
60 sha1_transform_neon(sctx->state, sctx->buffer, 1);
61 }
62
63 if (len - done >= SHA1_BLOCK_SIZE) {
64 const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
65
66 sha1_transform_neon(sctx->state, data + done, rounds);
67 done += rounds * SHA1_BLOCK_SIZE;
68 }
69
70 memcpy(sctx->buffer, data + done, len - done);
71
72 return 0;
73}
74
75static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
76 unsigned int len)
77{
78 struct sha1_state *sctx = shash_desc_ctx(desc);
79 unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
80 int res;
81
82 /* Handle the fast case right here */
83 if (partial + len < SHA1_BLOCK_SIZE) {
84 sctx->count += len;
85 memcpy(sctx->buffer + partial, data, len);
86
87 return 0;
88 }
89
90 if (!may_use_simd()) {
91 res = sha1_update_arm(desc, data, len);
92 } else {
93 kernel_neon_begin();
94 res = __sha1_neon_update(desc, data, len, partial);
95 kernel_neon_end();
96 }
97
98 return res;
99}
100
101
102/* Add padding and return the message digest. */
103static int sha1_neon_final(struct shash_desc *desc, u8 *out)
104{
105 struct sha1_state *sctx = shash_desc_ctx(desc);
106 unsigned int i, index, padlen;
107 __be32 *dst = (__be32 *)out;
108 __be64 bits;
109 static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
110
111 bits = cpu_to_be64(sctx->count << 3);
112
113 /* Pad out to 56 mod 64 and append length */
114 index = sctx->count % SHA1_BLOCK_SIZE;
115 padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
116 if (!may_use_simd()) {
117 sha1_update_arm(desc, padding, padlen);
118 sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
119 } else {
120 kernel_neon_begin();
121 /* We need to fill a whole block for __sha1_neon_update() */
122 if (padlen <= 56) {
123 sctx->count += padlen;
124 memcpy(sctx->buffer + index, padding, padlen);
125 } else {
126 __sha1_neon_update(desc, padding, padlen, index);
127 }
128 __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
129 kernel_neon_end();
130 }
131
132 /* Store state in digest */
133 for (i = 0; i < 5; i++)
134 dst[i] = cpu_to_be32(sctx->state[i]);
135
136 /* Wipe context */
137 memset(sctx, 0, sizeof(*sctx));
138
139 return 0;
140}
141
142static int sha1_neon_export(struct shash_desc *desc, void *out)
143{
144 struct sha1_state *sctx = shash_desc_ctx(desc);
145
146 memcpy(out, sctx, sizeof(*sctx));
147
148 return 0;
149}
150
151static int sha1_neon_import(struct shash_desc *desc, const void *in)
152{
153 struct sha1_state *sctx = shash_desc_ctx(desc);
154
155 memcpy(sctx, in, sizeof(*sctx));
156
157 return 0;
158}
159
160static struct shash_alg alg = {
161 .digestsize = SHA1_DIGEST_SIZE,
162 .init = sha1_neon_init,
163 .update = sha1_neon_update,
164 .final = sha1_neon_final,
165 .export = sha1_neon_export,
166 .import = sha1_neon_import,
167 .descsize = sizeof(struct sha1_state),
168 .statesize = sizeof(struct sha1_state),
169 .base = {
170 .cra_name = "sha1",
171 .cra_driver_name = "sha1-neon",
172 .cra_priority = 250,
173 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
174 .cra_blocksize = SHA1_BLOCK_SIZE,
175 .cra_module = THIS_MODULE,
176 }
177};
178
179static int __init sha1_neon_mod_init(void)
180{
181 if (!cpu_has_neon())
182 return -ENODEV;
183
184 return crypto_register_shash(&alg);
185}
186
187static void __exit sha1_neon_mod_fini(void)
188{
189 crypto_unregister_shash(&alg);
190}
191
192module_init(sha1_neon_mod_init);
193module_exit(sha1_neon_mod_fini);
194
195MODULE_LICENSE("GPL");
196MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
197MODULE_ALIAS("sha1");
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S
new file mode 100644
index 000000000000..fe99472e507c
--- /dev/null
+++ b/arch/arm/crypto/sha512-armv7-neon.S
@@ -0,0 +1,455 @@
1/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform
2 *
3 * Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
9 */
10
11#include <linux/linkage.h>
12
13
14.syntax unified
15.code 32
16.fpu neon
17
18.text
19
20/* structure of SHA512_CONTEXT */
21#define hd_a 0
22#define hd_b ((hd_a) + 8)
23#define hd_c ((hd_b) + 8)
24#define hd_d ((hd_c) + 8)
25#define hd_e ((hd_d) + 8)
26#define hd_f ((hd_e) + 8)
27#define hd_g ((hd_f) + 8)
28
29/* register macros */
30#define RK %r2
31
32#define RA d0
33#define RB d1
34#define RC d2
35#define RD d3
36#define RE d4
37#define RF d5
38#define RG d6
39#define RH d7
40
41#define RT0 d8
42#define RT1 d9
43#define RT2 d10
44#define RT3 d11
45#define RT4 d12
46#define RT5 d13
47#define RT6 d14
48#define RT7 d15
49
50#define RT01q q4
51#define RT23q q5
52#define RT45q q6
53#define RT67q q7
54
55#define RW0 d16
56#define RW1 d17
57#define RW2 d18
58#define RW3 d19
59#define RW4 d20
60#define RW5 d21
61#define RW6 d22
62#define RW7 d23
63#define RW8 d24
64#define RW9 d25
65#define RW10 d26
66#define RW11 d27
67#define RW12 d28
68#define RW13 d29
69#define RW14 d30
70#define RW15 d31
71
72#define RW01q q8
73#define RW23q q9
74#define RW45q q10
75#define RW67q q11
76#define RW89q q12
77#define RW1011q q13
78#define RW1213q q14
79#define RW1415q q15
80
81/***********************************************************************
82 * ARM assembly implementation of sha512 transform
83 ***********************************************************************/
84#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
85 rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
86 /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
87 vshr.u64 RT2, re, #14; \
88 vshl.u64 RT3, re, #64 - 14; \
89 interleave_op(arg1); \
90 vshr.u64 RT4, re, #18; \
91 vshl.u64 RT5, re, #64 - 18; \
92 vld1.64 {RT0}, [RK]!; \
93 veor.64 RT23q, RT23q, RT45q; \
94 vshr.u64 RT4, re, #41; \
95 vshl.u64 RT5, re, #64 - 41; \
96 vadd.u64 RT0, RT0, rw0; \
97 veor.64 RT23q, RT23q, RT45q; \
98 vmov.64 RT7, re; \
99 veor.64 RT1, RT2, RT3; \
100 vbsl.64 RT7, rf, rg; \
101 \
102 vadd.u64 RT1, RT1, rh; \
103 vshr.u64 RT2, ra, #28; \
104 vshl.u64 RT3, ra, #64 - 28; \
105 vadd.u64 RT1, RT1, RT0; \
106 vshr.u64 RT4, ra, #34; \
107 vshl.u64 RT5, ra, #64 - 34; \
108 vadd.u64 RT1, RT1, RT7; \
109 \
110 /* h = Sum0 (a) + Maj (a, b, c); */ \
111 veor.64 RT23q, RT23q, RT45q; \
112 vshr.u64 RT4, ra, #39; \
113 vshl.u64 RT5, ra, #64 - 39; \
114 veor.64 RT0, ra, rb; \
115 veor.64 RT23q, RT23q, RT45q; \
116 vbsl.64 RT0, rc, rb; \
117 vadd.u64 rd, rd, RT1; /* d+=t1; */ \
118 veor.64 rh, RT2, RT3; \
119 \
120 /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
121 vshr.u64 RT2, rd, #14; \
122 vshl.u64 RT3, rd, #64 - 14; \
123 vadd.u64 rh, rh, RT0; \
124 vshr.u64 RT4, rd, #18; \
125 vshl.u64 RT5, rd, #64 - 18; \
126 vadd.u64 rh, rh, RT1; /* h+=t1; */ \
127 vld1.64 {RT0}, [RK]!; \
128 veor.64 RT23q, RT23q, RT45q; \
129 vshr.u64 RT4, rd, #41; \
130 vshl.u64 RT5, rd, #64 - 41; \
131 vadd.u64 RT0, RT0, rw1; \
132 veor.64 RT23q, RT23q, RT45q; \
133 vmov.64 RT7, rd; \
134 veor.64 RT1, RT2, RT3; \
135 vbsl.64 RT7, re, rf; \
136 \
137 vadd.u64 RT1, RT1, rg; \
138 vshr.u64 RT2, rh, #28; \
139 vshl.u64 RT3, rh, #64 - 28; \
140 vadd.u64 RT1, RT1, RT0; \
141 vshr.u64 RT4, rh, #34; \
142 vshl.u64 RT5, rh, #64 - 34; \
143 vadd.u64 RT1, RT1, RT7; \
144 \
145 /* g = Sum0 (h) + Maj (h, a, b); */ \
146 veor.64 RT23q, RT23q, RT45q; \
147 vshr.u64 RT4, rh, #39; \
148 vshl.u64 RT5, rh, #64 - 39; \
149 veor.64 RT0, rh, ra; \
150 veor.64 RT23q, RT23q, RT45q; \
151 vbsl.64 RT0, rb, ra; \
152 vadd.u64 rc, rc, RT1; /* c+=t1; */ \
153 veor.64 rg, RT2, RT3; \
154 \
155 /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
156 /* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
157 \
158 /**** S0(w[1:2]) */ \
159 \
160 /* w[0:1] += w[9:10] */ \
161 /* RT23q = rw1:rw2 */ \
162 vext.u64 RT23q, rw01q, rw23q, #1; \
163 vadd.u64 rw0, rw9; \
164 vadd.u64 rg, rg, RT0; \
165 vadd.u64 rw1, rw10;\
166 vadd.u64 rg, rg, RT1; /* g+=t1; */ \
167 \
168 vshr.u64 RT45q, RT23q, #1; \
169 vshl.u64 RT67q, RT23q, #64 - 1; \
170 vshr.u64 RT01q, RT23q, #8; \
171 veor.u64 RT45q, RT45q, RT67q; \
172 vshl.u64 RT67q, RT23q, #64 - 8; \
173 veor.u64 RT45q, RT45q, RT01q; \
174 vshr.u64 RT01q, RT23q, #7; \
175 veor.u64 RT45q, RT45q, RT67q; \
176 \
177 /**** S1(w[14:15]) */ \
178 vshr.u64 RT23q, rw1415q, #6; \
179 veor.u64 RT01q, RT01q, RT45q; \
180 vshr.u64 RT45q, rw1415q, #19; \
181 vshl.u64 RT67q, rw1415q, #64 - 19; \
182 veor.u64 RT23q, RT23q, RT45q; \
183 vshr.u64 RT45q, rw1415q, #61; \
184 veor.u64 RT23q, RT23q, RT67q; \
185 vshl.u64 RT67q, rw1415q, #64 - 61; \
186 veor.u64 RT23q, RT23q, RT45q; \
187 vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
188 veor.u64 RT01q, RT23q, RT67q;
189#define vadd_RT01q(rw01q) \
190 /* w[0:1] += S(w[14:15]) */ \
191 vadd.u64 rw01q, RT01q;
192
193#define dummy(_) /*_*/
194
195#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
196 interleave_op1, arg1, interleave_op2, arg2) \
197 /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
198 vshr.u64 RT2, re, #14; \
199 vshl.u64 RT3, re, #64 - 14; \
200 interleave_op1(arg1); \
201 vshr.u64 RT4, re, #18; \
202 vshl.u64 RT5, re, #64 - 18; \
203 interleave_op2(arg2); \
204 vld1.64 {RT0}, [RK]!; \
205 veor.64 RT23q, RT23q, RT45q; \
206 vshr.u64 RT4, re, #41; \
207 vshl.u64 RT5, re, #64 - 41; \
208 vadd.u64 RT0, RT0, rw0; \
209 veor.64 RT23q, RT23q, RT45q; \
210 vmov.64 RT7, re; \
211 veor.64 RT1, RT2, RT3; \
212 vbsl.64 RT7, rf, rg; \
213 \
214 vadd.u64 RT1, RT1, rh; \
215 vshr.u64 RT2, ra, #28; \
216 vshl.u64 RT3, ra, #64 - 28; \
217 vadd.u64 RT1, RT1, RT0; \
218 vshr.u64 RT4, ra, #34; \
219 vshl.u64 RT5, ra, #64 - 34; \
220 vadd.u64 RT1, RT1, RT7; \
221 \
222 /* h = Sum0 (a) + Maj (a, b, c); */ \
223 veor.64 RT23q, RT23q, RT45q; \
224 vshr.u64 RT4, ra, #39; \
225 vshl.u64 RT5, ra, #64 - 39; \
226 veor.64 RT0, ra, rb; \
227 veor.64 RT23q, RT23q, RT45q; \
228 vbsl.64 RT0, rc, rb; \
229 vadd.u64 rd, rd, RT1; /* d+=t1; */ \
230 veor.64 rh, RT2, RT3; \
231 \
232 /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
233 vshr.u64 RT2, rd, #14; \
234 vshl.u64 RT3, rd, #64 - 14; \
235 vadd.u64 rh, rh, RT0; \
236 vshr.u64 RT4, rd, #18; \
237 vshl.u64 RT5, rd, #64 - 18; \
238 vadd.u64 rh, rh, RT1; /* h+=t1; */ \
239 vld1.64 {RT0}, [RK]!; \
240 veor.64 RT23q, RT23q, RT45q; \
241 vshr.u64 RT4, rd, #41; \
242 vshl.u64 RT5, rd, #64 - 41; \
243 vadd.u64 RT0, RT0, rw1; \
244 veor.64 RT23q, RT23q, RT45q; \
245 vmov.64 RT7, rd; \
246 veor.64 RT1, RT2, RT3; \
247 vbsl.64 RT7, re, rf; \
248 \
249 vadd.u64 RT1, RT1, rg; \
250 vshr.u64 RT2, rh, #28; \
251 vshl.u64 RT3, rh, #64 - 28; \
252 vadd.u64 RT1, RT1, RT0; \
253 vshr.u64 RT4, rh, #34; \
254 vshl.u64 RT5, rh, #64 - 34; \
255 vadd.u64 RT1, RT1, RT7; \
256 \
257 /* g = Sum0 (h) + Maj (h, a, b); */ \
258 veor.64 RT23q, RT23q, RT45q; \
259 vshr.u64 RT4, rh, #39; \
260 vshl.u64 RT5, rh, #64 - 39; \
261 veor.64 RT0, rh, ra; \
262 veor.64 RT23q, RT23q, RT45q; \
263 vbsl.64 RT0, rb, ra; \
264 vadd.u64 rc, rc, RT1; /* c+=t1; */ \
265 veor.64 rg, RT2, RT3;
266#define vadd_rg_RT0(rg) \
267 vadd.u64 rg, rg, RT0;
268#define vadd_rg_RT1(rg) \
269 vadd.u64 rg, rg, RT1; /* g+=t1; */
270
271.align 3
272ENTRY(sha512_transform_neon)
273 /* Input:
274 * %r0: SHA512_CONTEXT
275 * %r1: data
276 * %r2: u64 k[] constants
277 * %r3: nblks
278 */
279 push {%lr};
280
281 mov %lr, #0;
282
283 /* Load context to d0-d7 */
284 vld1.64 {RA-RD}, [%r0]!;
285 vld1.64 {RE-RH}, [%r0];
286 sub %r0, #(4*8);
287
288 /* Load input to w[16], d16-d31 */
289 /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
290 vld1.64 {RW0-RW3}, [%r1]!;
291 vld1.64 {RW4-RW7}, [%r1]!;
292 vld1.64 {RW8-RW11}, [%r1]!;
293 vld1.64 {RW12-RW15}, [%r1]!;
294#ifdef __ARMEL__
295 /* byteswap */
296 vrev64.8 RW01q, RW01q;
297 vrev64.8 RW23q, RW23q;
298 vrev64.8 RW45q, RW45q;
299 vrev64.8 RW67q, RW67q;
300 vrev64.8 RW89q, RW89q;
301 vrev64.8 RW1011q, RW1011q;
302 vrev64.8 RW1213q, RW1213q;
303 vrev64.8 RW1415q, RW1415q;
304#endif
305
306 /* EABI says that d8-d15 must be preserved by callee. */
307 /*vpush {RT0-RT7};*/
308
309.Loop:
310 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
311 RW23q, RW1415q, RW9, RW10, dummy, _);
312 b .Lenter_rounds;
313
314.Loop_rounds:
315 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
316 RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
317.Lenter_rounds:
318 rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
319 RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
320 rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
321 RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
322 rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
323 RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
324 rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
325 RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
326 rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
327 RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
328 add %lr, #16;
329 rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
330 RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
331 cmp %lr, #64;
332 rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
333 RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
334 bne .Loop_rounds;
335
336 subs %r3, #1;
337
338 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
339 vadd_RT01q, RW1415q, dummy, _);
340 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
341 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
342 beq .Lhandle_tail;
343 vld1.64 {RW0-RW3}, [%r1]!;
344 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
345 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
346 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
347 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
348#ifdef __ARMEL__
349 vrev64.8 RW01q, RW01q;
350 vrev64.8 RW23q, RW23q;
351#endif
352 vld1.64 {RW4-RW7}, [%r1]!;
353 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
354 vadd_rg_RT0, RA, vadd_rg_RT1, RA);
355 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
356 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
357#ifdef __ARMEL__
358 vrev64.8 RW45q, RW45q;
359 vrev64.8 RW67q, RW67q;
360#endif
361 vld1.64 {RW8-RW11}, [%r1]!;
362 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
363 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
364 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
365 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
366#ifdef __ARMEL__
367 vrev64.8 RW89q, RW89q;
368 vrev64.8 RW1011q, RW1011q;
369#endif
370 vld1.64 {RW12-RW15}, [%r1]!;
371 vadd_rg_RT0(RA);
372 vadd_rg_RT1(RA);
373
374 /* Load context */
375 vld1.64 {RT0-RT3}, [%r0]!;
376 vld1.64 {RT4-RT7}, [%r0];
377 sub %r0, #(4*8);
378
379#ifdef __ARMEL__
380 vrev64.8 RW1213q, RW1213q;
381 vrev64.8 RW1415q, RW1415q;
382#endif
383
384 vadd.u64 RA, RT0;
385 vadd.u64 RB, RT1;
386 vadd.u64 RC, RT2;
387 vadd.u64 RD, RT3;
388 vadd.u64 RE, RT4;
389 vadd.u64 RF, RT5;
390 vadd.u64 RG, RT6;
391 vadd.u64 RH, RT7;
392
393 /* Store the first half of context */
394 vst1.64 {RA-RD}, [%r0]!;
395 sub RK, $(8*80);
396 vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
397 mov %lr, #0;
398 sub %r0, #(4*8);
399
400 b .Loop;
401
402.Lhandle_tail:
403 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
404 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
405 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
406 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
407 rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
408 vadd_rg_RT0, RA, vadd_rg_RT1, RA);
409 rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
410 vadd_rg_RT0, RG, vadd_rg_RT1, RG);
411 rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
412 vadd_rg_RT0, RE, vadd_rg_RT1, RE);
413 rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
414 vadd_rg_RT0, RC, vadd_rg_RT1, RC);
415
416 /* Load context to d16-d23 */
417 vld1.64 {RW0-RW3}, [%r0]!;
418 vadd_rg_RT0(RA);
419 vld1.64 {RW4-RW7}, [%r0];
420 vadd_rg_RT1(RA);
421 sub %r0, #(4*8);
422
423 vadd.u64 RA, RW0;
424 vadd.u64 RB, RW1;
425 vadd.u64 RC, RW2;
426 vadd.u64 RD, RW3;
427 vadd.u64 RE, RW4;
428 vadd.u64 RF, RW5;
429 vadd.u64 RG, RW6;
430 vadd.u64 RH, RW7;
431
432 /* Store the first half of context */
433 vst1.64 {RA-RD}, [%r0]!;
434
435 /* Clear used registers */
436 /* d16-d31 */
437 veor.u64 RW01q, RW01q;
438 veor.u64 RW23q, RW23q;
439 veor.u64 RW45q, RW45q;
440 veor.u64 RW67q, RW67q;
441 vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
442 veor.u64 RW89q, RW89q;
443 veor.u64 RW1011q, RW1011q;
444 veor.u64 RW1213q, RW1213q;
445 veor.u64 RW1415q, RW1415q;
446 /* d8-d15 */
447 /*vpop {RT0-RT7};*/
448 /* d0-d7 (q0-q3) */
449 veor.u64 %q0, %q0;
450 veor.u64 %q1, %q1;
451 veor.u64 %q2, %q2;
452 veor.u64 %q3, %q3;
453
454 pop {%pc};
455ENDPROC(sha512_transform_neon)
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
new file mode 100644
index 000000000000..0d2758ff5e12
--- /dev/null
+++ b/arch/arm/crypto/sha512_neon_glue.c
@@ -0,0 +1,305 @@
1/*
2 * Glue code for the SHA512 Secure Hash Algorithm assembly implementation
3 * using NEON instructions.
4 *
5 * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 *
7 * This file is based on sha512_ssse3_glue.c:
8 * Copyright (C) 2013 Intel Corporation
9 * Author: Tim Chen <tim.c.chen@linux.intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 */
17
18#include <crypto/internal/hash.h>
19#include <linux/init.h>
20#include <linux/module.h>
21#include <linux/mm.h>
22#include <linux/cryptohash.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <crypto/sha.h>
26#include <asm/byteorder.h>
27#include <asm/simd.h>
28#include <asm/neon.h>
29
30
31static const u64 sha512_k[] = {
32 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
33 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
34 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
35 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
36 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
37 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
38 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
39 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
40 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
41 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
42 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
43 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
44 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
45 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
46 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
47 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
48 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
49 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
50 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
51 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
52 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
53 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
54 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
55 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
56 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
57 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
58 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
59 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
60 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
61 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
62 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
63 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
64 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
65 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
66 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
67 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
68 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
69 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
70 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
71 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
72};
73
74
75asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
76 const u64 k[], unsigned int num_blks);
77
78
79static int sha512_neon_init(struct shash_desc *desc)
80{
81 struct sha512_state *sctx = shash_desc_ctx(desc);
82
83 sctx->state[0] = SHA512_H0;
84 sctx->state[1] = SHA512_H1;
85 sctx->state[2] = SHA512_H2;
86 sctx->state[3] = SHA512_H3;
87 sctx->state[4] = SHA512_H4;
88 sctx->state[5] = SHA512_H5;
89 sctx->state[6] = SHA512_H6;
90 sctx->state[7] = SHA512_H7;
91 sctx->count[0] = sctx->count[1] = 0;
92
93 return 0;
94}
95
96static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
97 unsigned int len, unsigned int partial)
98{
99 struct sha512_state *sctx = shash_desc_ctx(desc);
100 unsigned int done = 0;
101
102 sctx->count[0] += len;
103 if (sctx->count[0] < len)
104 sctx->count[1]++;
105
106 if (partial) {
107 done = SHA512_BLOCK_SIZE - partial;
108 memcpy(sctx->buf + partial, data, done);
109 sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
110 }
111
112 if (len - done >= SHA512_BLOCK_SIZE) {
113 const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
114
115 sha512_transform_neon(sctx->state, data + done, sha512_k,
116 rounds);
117
118 done += rounds * SHA512_BLOCK_SIZE;
119 }
120
121 memcpy(sctx->buf, data + done, len - done);
122
123 return 0;
124}
125
126static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
127 unsigned int len)
128{
129 struct sha512_state *sctx = shash_desc_ctx(desc);
130 unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
131 int res;
132
133 /* Handle the fast case right here */
134 if (partial + len < SHA512_BLOCK_SIZE) {
135 sctx->count[0] += len;
136 if (sctx->count[0] < len)
137 sctx->count[1]++;
138 memcpy(sctx->buf + partial, data, len);
139
140 return 0;
141 }
142
143 if (!may_use_simd()) {
144 res = crypto_sha512_update(desc, data, len);
145 } else {
146 kernel_neon_begin();
147 res = __sha512_neon_update(desc, data, len, partial);
148 kernel_neon_end();
149 }
150
151 return res;
152}
153
154
155/* Add padding and return the message digest. */
156static int sha512_neon_final(struct shash_desc *desc, u8 *out)
157{
158 struct sha512_state *sctx = shash_desc_ctx(desc);
159 unsigned int i, index, padlen;
160 __be64 *dst = (__be64 *)out;
161 __be64 bits[2];
162 static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
163
164 /* save number of bits */
165 bits[1] = cpu_to_be64(sctx->count[0] << 3);
166 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
167
168 /* Pad out to 112 mod 128 and append length */
169 index = sctx->count[0] & 0x7f;
170 padlen = (index < 112) ? (112 - index) : ((128+112) - index);
171
172 if (!may_use_simd()) {
173 crypto_sha512_update(desc, padding, padlen);
174 crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
175 } else {
176 kernel_neon_begin();
177 /* We need to fill a whole block for __sha512_neon_update() */
178 if (padlen <= 112) {
179 sctx->count[0] += padlen;
180 if (sctx->count[0] < padlen)
181 sctx->count[1]++;
182 memcpy(sctx->buf + index, padding, padlen);
183 } else {
184 __sha512_neon_update(desc, padding, padlen, index);
185 }
186 __sha512_neon_update(desc, (const u8 *)&bits,
187 sizeof(bits), 112);
188 kernel_neon_end();
189 }
190
191 /* Store state in digest */
192 for (i = 0; i < 8; i++)
193 dst[i] = cpu_to_be64(sctx->state[i]);
194
195 /* Wipe context */
196 memset(sctx, 0, sizeof(*sctx));
197
198 return 0;
199}
200
201static int sha512_neon_export(struct shash_desc *desc, void *out)
202{
203 struct sha512_state *sctx = shash_desc_ctx(desc);
204
205 memcpy(out, sctx, sizeof(*sctx));
206
207 return 0;
208}
209
210static int sha512_neon_import(struct shash_desc *desc, const void *in)
211{
212 struct sha512_state *sctx = shash_desc_ctx(desc);
213
214 memcpy(sctx, in, sizeof(*sctx));
215
216 return 0;
217}
218
219static int sha384_neon_init(struct shash_desc *desc)
220{
221 struct sha512_state *sctx = shash_desc_ctx(desc);
222
223 sctx->state[0] = SHA384_H0;
224 sctx->state[1] = SHA384_H1;
225 sctx->state[2] = SHA384_H2;
226 sctx->state[3] = SHA384_H3;
227 sctx->state[4] = SHA384_H4;
228 sctx->state[5] = SHA384_H5;
229 sctx->state[6] = SHA384_H6;
230 sctx->state[7] = SHA384_H7;
231
232 sctx->count[0] = sctx->count[1] = 0;
233
234 return 0;
235}
236
237static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
238{
239 u8 D[SHA512_DIGEST_SIZE];
240
241 sha512_neon_final(desc, D);
242
243 memcpy(hash, D, SHA384_DIGEST_SIZE);
244 memset(D, 0, SHA512_DIGEST_SIZE);
245
246 return 0;
247}
248
249static struct shash_alg algs[] = { {
250 .digestsize = SHA512_DIGEST_SIZE,
251 .init = sha512_neon_init,
252 .update = sha512_neon_update,
253 .final = sha512_neon_final,
254 .export = sha512_neon_export,
255 .import = sha512_neon_import,
256 .descsize = sizeof(struct sha512_state),
257 .statesize = sizeof(struct sha512_state),
258 .base = {
259 .cra_name = "sha512",
260 .cra_driver_name = "sha512-neon",
261 .cra_priority = 250,
262 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
263 .cra_blocksize = SHA512_BLOCK_SIZE,
264 .cra_module = THIS_MODULE,
265 }
266}, {
267 .digestsize = SHA384_DIGEST_SIZE,
268 .init = sha384_neon_init,
269 .update = sha512_neon_update,
270 .final = sha384_neon_final,
271 .export = sha512_neon_export,
272 .import = sha512_neon_import,
273 .descsize = sizeof(struct sha512_state),
274 .statesize = sizeof(struct sha512_state),
275 .base = {
276 .cra_name = "sha384",
277 .cra_driver_name = "sha384-neon",
278 .cra_priority = 250,
279 .cra_flags = CRYPTO_ALG_TYPE_SHASH,
280 .cra_blocksize = SHA384_BLOCK_SIZE,
281 .cra_module = THIS_MODULE,
282 }
283} };
284
285static int __init sha512_neon_mod_init(void)
286{
287 if (!cpu_has_neon())
288 return -ENODEV;
289
290 return crypto_register_shashes(algs, ARRAY_SIZE(algs));
291}
292
293static void __exit sha512_neon_mod_fini(void)
294{
295 crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
296}
297
298module_init(sha512_neon_mod_init);
299module_exit(sha512_neon_mod_fini);
300
301MODULE_LICENSE("GPL");
302MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
303
304MODULE_ALIAS("sha512");
305MODULE_ALIAS("sha384");
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 57f0584e8d97..f67fd3afebdf 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -24,6 +24,8 @@
24#include <asm/domain.h> 24#include <asm/domain.h>
25#include <asm/opcodes-virt.h> 25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
27 29
28#define IOMEM(x) (x) 30#define IOMEM(x) (x)
29 31
@@ -179,10 +181,10 @@
179 * Get current thread_info. 181 * Get current thread_info.
180 */ 182 */
181 .macro get_thread_info, rd 183 .macro get_thread_info, rd
182 ARM( mov \rd, sp, lsr #13 ) 184 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
183 THUMB( mov \rd, sp ) 185 THUMB( mov \rd, sp )
184 THUMB( lsr \rd, \rd, #13 ) 186 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
185 mov \rd, \rd, lsl #13 187 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
186 .endm 188 .endm
187 189
188/* 190/*
@@ -425,4 +427,25 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
425#endif 427#endif
426 .endm 428 .endm
427 429
430 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
431 .macro ret\c, reg
432#if __LINUX_ARM_ARCH__ < 6
433 mov\c pc, \reg
434#else
435 .ifeqs "\reg", "lr"
436 bx\c \reg
437 .else
438 mov\c pc, \reg
439 .endif
440#endif
441 .endm
442 .endr
443
444 .macro ret.w, reg
445 ret \reg
446#ifdef CONFIG_THUMB2_KERNEL
447 nop
448#endif
449 .endm
450
428#endif /* __ASM_ASSEMBLER_H__ */ 451#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c2b7321a478..963a2515906d 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -62,17 +62,18 @@
62#define ARM_CPU_IMP_ARM 0x41 62#define ARM_CPU_IMP_ARM 0x41
63#define ARM_CPU_IMP_INTEL 0x69 63#define ARM_CPU_IMP_INTEL 0x69
64 64
65#define ARM_CPU_PART_ARM1136 0xB360 65/* ARM implemented processors */
66#define ARM_CPU_PART_ARM1156 0xB560 66#define ARM_CPU_PART_ARM1136 0x4100b360
67#define ARM_CPU_PART_ARM1176 0xB760 67#define ARM_CPU_PART_ARM1156 0x4100b560
68#define ARM_CPU_PART_ARM11MPCORE 0xB020 68#define ARM_CPU_PART_ARM1176 0x4100b760
69#define ARM_CPU_PART_CORTEX_A8 0xC080 69#define ARM_CPU_PART_ARM11MPCORE 0x4100b020
70#define ARM_CPU_PART_CORTEX_A9 0xC090 70#define ARM_CPU_PART_CORTEX_A8 0x4100c080
71#define ARM_CPU_PART_CORTEX_A5 0xC050 71#define ARM_CPU_PART_CORTEX_A9 0x4100c090
72#define ARM_CPU_PART_CORTEX_A15 0xC0F0 72#define ARM_CPU_PART_CORTEX_A5 0x4100c050
73#define ARM_CPU_PART_CORTEX_A7 0xC070 73#define ARM_CPU_PART_CORTEX_A7 0x4100c070
74#define ARM_CPU_PART_CORTEX_A12 0xC0D0 74#define ARM_CPU_PART_CORTEX_A12 0x4100c0d0
75#define ARM_CPU_PART_CORTEX_A17 0xC0E0 75#define ARM_CPU_PART_CORTEX_A17 0x4100c0e0
76#define ARM_CPU_PART_CORTEX_A15 0x4100c0f0
76 77
77#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 78#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
78#define ARM_CPU_XSCALE_ARCH_V1 0x2000 79#define ARM_CPU_XSCALE_ARCH_V1 0x2000
@@ -171,14 +172,24 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
171 return (read_cpuid_id() & 0xFF000000) >> 24; 172 return (read_cpuid_id() & 0xFF000000) >> 24;
172} 173}
173 174
174static inline unsigned int __attribute_const__ read_cpuid_part_number(void) 175/*
176 * The CPU part number is meaningless without referring to the CPU
177 * implementer: implementers are free to define their own part numbers
178 * which are permitted to clash with other implementer part numbers.
179 */
180static inline unsigned int __attribute_const__ read_cpuid_part(void)
181{
182 return read_cpuid_id() & 0xff00fff0;
183}
184
185static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void)
175{ 186{
176 return read_cpuid_id() & 0xFFF0; 187 return read_cpuid_id() & 0xFFF0;
177} 188}
178 189
179static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) 190static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
180{ 191{
181 return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; 192 return read_cpuid_id() & ARM_CPU_XSCALE_ARCH_MASK;
182} 193}
183 194
184static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) 195static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
diff --git a/arch/arm/include/asm/crypto/sha1.h b/arch/arm/include/asm/crypto/sha1.h
new file mode 100644
index 000000000000..75e6a417416b
--- /dev/null
+++ b/arch/arm/include/asm/crypto/sha1.h
@@ -0,0 +1,10 @@
1#ifndef ASM_ARM_CRYPTO_SHA1_H
2#define ASM_ARM_CRYPTO_SHA1_H
3
4#include <linux/crypto.h>
5#include <crypto/sha.h>
6
7extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
8 unsigned int len);
9
10#endif
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 88d61815f0c0..469a2b30fa27 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -35,5 +35,5 @@
35\symbol_name: 35\symbol_name:
36 mov r8, lr 36 mov r8, lr
37 arch_irq_handler_default 37 arch_irq_handler_default
38 mov pc, r8 38 ret r8
39 .endm 39 .endm
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index 74a8b84f3cb1..74be7c22035a 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -221,15 +221,6 @@
221# endif 221# endif
222#endif 222#endif
223 223
224#ifdef CONFIG_CPU_V7
225# ifdef CPU_NAME
226# undef MULTI_CPU
227# define MULTI_CPU
228# else
229# define CPU_NAME cpu_v7
230# endif
231#endif
232
233#ifdef CONFIG_CPU_V7M 224#ifdef CONFIG_CPU_V7M
234# ifdef CPU_NAME 225# ifdef CPU_NAME
235# undef MULTI_CPU 226# undef MULTI_CPU
@@ -248,6 +239,15 @@
248# endif 239# endif
249#endif 240#endif
250 241
242#ifdef CONFIG_CPU_V7
243/*
244 * Cortex-A9 needs a different suspend/resume function, so we need
245 * multiple CPU support for ARMv7 anyway.
246 */
247# undef MULTI_CPU
248# define MULTI_CPU
249#endif
250
251#ifndef MULTI_CPU 251#ifndef MULTI_CPU
252#define cpu_proc_init __glue(CPU_NAME,_proc_init) 252#define cpu_proc_init __glue(CPU_NAME,_proc_init)
253#define cpu_proc_fin __glue(CPU_NAME,_proc_fin) 253#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 94060adba174..57ff7f2a3084 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -217,6 +217,22 @@ int __mcpm_cluster_state(unsigned int cluster);
217int __init mcpm_sync_init( 217int __init mcpm_sync_init(
218 void (*power_up_setup)(unsigned int affinity_level)); 218 void (*power_up_setup)(unsigned int affinity_level));
219 219
220/**
221 * mcpm_loopback - make a run through the MCPM low-level code
222 *
223 * @cache_disable: pointer to function performing cache disabling
224 *
225 * This exercises the MCPM machinery by soft resetting the CPU and branching
226 * to the MCPM low-level entry code before returning to the caller.
227 * The @cache_disable function must do the necessary cache disabling to
228 * let the regular kernel init code turn it back on as if the CPU was
229 * hotplugged in. The MCPM state machine is set as if the cluster was
230 * initialized meaning the power_up_setup callback passed to mcpm_sync_init()
231 * will be invoked for all affinity levels. This may be useful to initialize
232 * some resources such as enabling the CCI that requires the cache to be off, or simply for testing purposes.
233 */
234int __init mcpm_loopback(void (*cache_disable)(void));
235
220void __init mcpm_smp_set_ops(void); 236void __init mcpm_smp_set_ops(void);
221 237
222#else 238#else
diff --git a/arch/arm/include/asm/mcs_spinlock.h b/arch/arm/include/asm/mcs_spinlock.h
new file mode 100644
index 000000000000..f652ad65840a
--- /dev/null
+++ b/arch/arm/include/asm/mcs_spinlock.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_MCS_LOCK_H
2#define __ASM_MCS_LOCK_H
3
4#ifdef CONFIG_SMP
5#include <asm/spinlock.h>
6
7/* MCS spin-locking. */
8#define arch_mcs_spin_lock_contended(lock) \
9do { \
10 /* Ensure prior stores are observed before we enter wfe. */ \
11 smp_mb(); \
12 while (!(smp_load_acquire(lock))) \
13 wfe(); \
14} while (0) \
15
16#define arch_mcs_spin_unlock_contended(lock) \
17do { \
18 smp_store_release(lock, 1); \
19 dsb_sev(); \
20} while (0)
21
22#endif /* CONFIG_SMP */
23#endif /* __ASM_MCS_LOCK_H */
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 2b751464d6ff..04ccf1c0a1af 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -150,13 +150,11 @@
150 150
151/* 151/*
152 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical 152 * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
153 * memory. This is used for XIP and NoMMU kernels, or by kernels which 153 * memory. This is used for XIP and NoMMU kernels, and on platforms that don't
154 * have their own mach/memory.h. Assembly code must always use 154 * have CONFIG_ARM_PATCH_PHYS_VIRT. Assembly code must always use
155 * PLAT_PHYS_OFFSET and not PHYS_OFFSET. 155 * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
156 */ 156 */
157#ifndef PLAT_PHYS_OFFSET
158#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET) 157#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
159#endif
160 158
161#ifndef __ASSEMBLY__ 159#ifndef __ASSEMBLY__
162 160
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 626989fec4d3..9fd61c72a33a 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -43,7 +43,7 @@
43#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 43#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
44#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 44#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
45#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */ 45#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
46#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */ 46#define PMD_SECT_AP2 (_AT(pmdval_t, 1) << 7) /* read only */
47#define PMD_SECT_S (_AT(pmdval_t, 3) << 8) 47#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
48#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) 48#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
49#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) 49#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
@@ -72,6 +72,7 @@
72#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1) 72#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
73#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */ 73#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
74#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */ 74#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
75#define PTE_AP2 (_AT(pteval_t, 1) << 7) /* AP[2] */
75#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 76#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
76#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 77#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
77#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ 78#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 85c60adc8b60..06e0bc0f8b00 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -79,18 +79,19 @@
79#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */ 79#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Present */
80#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */ 80#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
81#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */ 81#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
82#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
83#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 82#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
84#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */ 83#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
85#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */ 84#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
86#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */ 85#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55)
87#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */ 86#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56)
88#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */ 87#define L_PTE_NONE (_AT(pteval_t, 1) << 57) /* PROT_NONE */
88#define L_PTE_RDONLY (_AT(pteval_t, 1) << 58) /* READ ONLY */
89 89
90#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0) 90#define L_PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
91#define PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55) 91#define L_PMD_SECT_DIRTY (_AT(pmdval_t, 1) << 55)
92#define PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56) 92#define L_PMD_SECT_SPLITTING (_AT(pmdval_t, 1) << 56)
93#define PMD_SECT_NONE (_AT(pmdval_t, 1) << 57) 93#define L_PMD_SECT_NONE (_AT(pmdval_t, 1) << 57)
94#define L_PMD_SECT_RDONLY (_AT(pteval_t, 1) << 58)
94 95
95/* 96/*
96 * To be used in assembly code with the upper page attributes. 97 * To be used in assembly code with the upper page attributes.
@@ -207,27 +208,32 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
207#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT)) 208#define pte_huge(pte) (pte_val(pte) && !(pte_val(pte) & PTE_TABLE_BIT))
208#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) 209#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
209 210
210#define pmd_young(pmd) (pmd_val(pmd) & PMD_SECT_AF) 211#define pmd_isset(pmd, val) ((u32)(val) == (val) ? pmd_val(pmd) & (val) \
212 : !!(pmd_val(pmd) & (val)))
213#define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
214
215#define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
211 216
212#define __HAVE_ARCH_PMD_WRITE 217#define __HAVE_ARCH_PMD_WRITE
213#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 218#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
219#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
214 220
215#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) 221#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
216#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) 222#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
217 223
218#ifdef CONFIG_TRANSPARENT_HUGEPAGE 224#ifdef CONFIG_TRANSPARENT_HUGEPAGE
219#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 225#define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd))
220#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 226#define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING))
221#endif 227#endif
222 228
223#define PMD_BIT_FUNC(fn,op) \ 229#define PMD_BIT_FUNC(fn,op) \
224static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; } 230static inline pmd_t pmd_##fn(pmd_t pmd) { pmd_val(pmd) op; return pmd; }
225 231
226PMD_BIT_FUNC(wrprotect, |= PMD_SECT_RDONLY); 232PMD_BIT_FUNC(wrprotect, |= L_PMD_SECT_RDONLY);
227PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF); 233PMD_BIT_FUNC(mkold, &= ~PMD_SECT_AF);
228PMD_BIT_FUNC(mksplitting, |= PMD_SECT_SPLITTING); 234PMD_BIT_FUNC(mksplitting, |= L_PMD_SECT_SPLITTING);
229PMD_BIT_FUNC(mkwrite, &= ~PMD_SECT_RDONLY); 235PMD_BIT_FUNC(mkwrite, &= ~L_PMD_SECT_RDONLY);
230PMD_BIT_FUNC(mkdirty, |= PMD_SECT_DIRTY); 236PMD_BIT_FUNC(mkdirty, |= L_PMD_SECT_DIRTY);
231PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); 237PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
232 238
233#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT)) 239#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
@@ -241,8 +247,8 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
241 247
242static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 248static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
243{ 249{
244 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | PMD_SECT_RDONLY | 250 const pmdval_t mask = PMD_SECT_USER | PMD_SECT_XN | L_PMD_SECT_RDONLY |
245 PMD_SECT_VALID | PMD_SECT_NONE; 251 L_PMD_SECT_VALID | L_PMD_SECT_NONE;
246 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask); 252 pmd_val(pmd) = (pmd_val(pmd) & ~mask) | (pgprot_val(newprot) & mask);
247 return pmd; 253 return pmd;
248} 254}
@@ -253,8 +259,13 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
253 BUG_ON(addr >= TASK_SIZE); 259 BUG_ON(addr >= TASK_SIZE);
254 260
255 /* create a faulting entry if PROT_NONE protected */ 261 /* create a faulting entry if PROT_NONE protected */
256 if (pmd_val(pmd) & PMD_SECT_NONE) 262 if (pmd_val(pmd) & L_PMD_SECT_NONE)
257 pmd_val(pmd) &= ~PMD_SECT_VALID; 263 pmd_val(pmd) &= ~L_PMD_SECT_VALID;
264
265 if (pmd_write(pmd) && pmd_dirty(pmd))
266 pmd_val(pmd) &= ~PMD_SECT_AP2;
267 else
268 pmd_val(pmd) |= PMD_SECT_AP2;
258 269
259 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG); 270 *pmdp = __pmd(pmd_val(pmd) | PMD_SECT_nG);
260 flush_pmd_entry(pmdp); 271 flush_pmd_entry(pmdp);
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 5478e5d6ad89..01baef07cd0c 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -214,18 +214,22 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
214 214
215#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) 215#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
216 216
217#define pte_isset(pte, val) ((u32)(val) == (val) ? pte_val(pte) & (val) \
218 : !!(pte_val(pte) & (val)))
219#define pte_isclear(pte, val) (!(pte_val(pte) & (val)))
220
217#define pte_none(pte) (!pte_val(pte)) 221#define pte_none(pte) (!pte_val(pte))
218#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) 222#define pte_present(pte) (pte_isset((pte), L_PTE_PRESENT))
219#define pte_valid(pte) (pte_val(pte) & L_PTE_VALID) 223#define pte_valid(pte) (pte_isset((pte), L_PTE_VALID))
220#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) 224#define pte_accessible(mm, pte) (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
221#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) 225#define pte_write(pte) (pte_isclear((pte), L_PTE_RDONLY))
222#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 226#define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY))
223#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 227#define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG))
224#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) 228#define pte_exec(pte) (pte_isclear((pte), L_PTE_XN))
225#define pte_special(pte) (0) 229#define pte_special(pte) (0)
226 230
227#define pte_valid_user(pte) \ 231#define pte_valid_user(pte) \
228 (pte_valid(pte) && (pte_val(pte) & L_PTE_USER) && pte_young(pte)) 232 (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
229 233
230#if __LINUX_ARM_ARCH__ < 6 234#if __LINUX_ARM_ARCH__ < 6
231static inline void __sync_icache_dcache(pte_t pteval) 235static inline void __sync_icache_dcache(pte_t pteval)
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index c877654fe3bf..601264d983fa 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -84,6 +84,12 @@ static inline long regs_return_value(struct pt_regs *regs)
84 84
85#define instruction_pointer(regs) (regs)->ARM_pc 85#define instruction_pointer(regs) (regs)->ARM_pc
86 86
87#ifdef CONFIG_THUMB2_KERNEL
88#define frame_pointer(regs) (regs)->ARM_r7
89#else
90#define frame_pointer(regs) (regs)->ARM_fp
91#endif
92
87static inline void instruction_pointer_set(struct pt_regs *regs, 93static inline void instruction_pointer_set(struct pt_regs *regs,
88 unsigned long val) 94 unsigned long val)
89{ 95{
diff --git a/arch/arm/include/asm/smp_scu.h b/arch/arm/include/asm/smp_scu.h
index 0393fbab8dd5..bfe163c40024 100644
--- a/arch/arm/include/asm/smp_scu.h
+++ b/arch/arm/include/asm/smp_scu.h
@@ -11,7 +11,7 @@
11 11
12static inline bool scu_a9_has_base(void) 12static inline bool scu_a9_has_base(void)
13{ 13{
14 return read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 14 return read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
15} 15}
16 16
17static inline unsigned long scu_a9_get_base(void) 17static inline unsigned long scu_a9_get_base(void)
diff --git a/arch/arm/include/asm/stacktrace.h b/arch/arm/include/asm/stacktrace.h
index 4d0a16441b29..7722201ead19 100644
--- a/arch/arm/include/asm/stacktrace.h
+++ b/arch/arm/include/asm/stacktrace.h
@@ -1,13 +1,28 @@
1#ifndef __ASM_STACKTRACE_H 1#ifndef __ASM_STACKTRACE_H
2#define __ASM_STACKTRACE_H 2#define __ASM_STACKTRACE_H
3 3
4#include <asm/ptrace.h>
5
4struct stackframe { 6struct stackframe {
7 /*
8 * FP member should hold R7 when CONFIG_THUMB2_KERNEL is enabled
9 * and R11 otherwise.
10 */
5 unsigned long fp; 11 unsigned long fp;
6 unsigned long sp; 12 unsigned long sp;
7 unsigned long lr; 13 unsigned long lr;
8 unsigned long pc; 14 unsigned long pc;
9}; 15};
10 16
17static __always_inline
18void arm_get_current_stackframe(struct pt_regs *regs, struct stackframe *frame)
19{
20 frame->fp = frame_pointer(regs);
21 frame->sp = regs->ARM_sp;
22 frame->lr = regs->ARM_lr;
23 frame->pc = regs->ARM_pc;
24}
25
11extern int unwind_frame(struct stackframe *frame); 26extern int unwind_frame(struct stackframe *frame);
12extern void walk_stackframe(struct stackframe *frame, 27extern void walk_stackframe(struct stackframe *frame,
13 int (*fn)(struct stackframe *, void *), void *data); 28 int (*fn)(struct stackframe *, void *), void *data);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index e4e4208a9130..fc44d3761f9e 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -14,9 +14,10 @@
14 14
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/fpstate.h> 16#include <asm/fpstate.h>
17#include <asm/page.h>
17 18
18#define THREAD_SIZE_ORDER 1 19#define THREAD_SIZE_ORDER 1
19#define THREAD_SIZE 8192 20#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
20#define THREAD_START_SP (THREAD_SIZE - 8) 21#define THREAD_START_SP (THREAD_SIZE - 8)
21 22
22#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 75d95799b6e6..7057cf8b87d0 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -107,6 +107,8 @@ static inline void set_fs(mm_segment_t fs)
107extern int __get_user_1(void *); 107extern int __get_user_1(void *);
108extern int __get_user_2(void *); 108extern int __get_user_2(void *);
109extern int __get_user_4(void *); 109extern int __get_user_4(void *);
110extern int __get_user_lo8(void *);
111extern int __get_user_8(void *);
110 112
111#define __GUP_CLOBBER_1 "lr", "cc" 113#define __GUP_CLOBBER_1 "lr", "cc"
112#ifdef CONFIG_CPU_USE_DOMAINS 114#ifdef CONFIG_CPU_USE_DOMAINS
@@ -115,6 +117,8 @@ extern int __get_user_4(void *);
115#define __GUP_CLOBBER_2 "lr", "cc" 117#define __GUP_CLOBBER_2 "lr", "cc"
116#endif 118#endif
117#define __GUP_CLOBBER_4 "lr", "cc" 119#define __GUP_CLOBBER_4 "lr", "cc"
120#define __GUP_CLOBBER_lo8 "lr", "cc"
121#define __GUP_CLOBBER_8 "lr", "cc"
118 122
119#define __get_user_x(__r2,__p,__e,__l,__s) \ 123#define __get_user_x(__r2,__p,__e,__l,__s) \
120 __asm__ __volatile__ ( \ 124 __asm__ __volatile__ ( \
@@ -125,11 +129,19 @@ extern int __get_user_4(void *);
125 : "0" (__p), "r" (__l) \ 129 : "0" (__p), "r" (__l) \
126 : __GUP_CLOBBER_##__s) 130 : __GUP_CLOBBER_##__s)
127 131
132/* narrowing a double-word get into a single 32bit word register: */
133#ifdef __ARMEB__
134#define __get_user_xb(__r2, __p, __e, __l, __s) \
135 __get_user_x(__r2, __p, __e, __l, lo8)
136#else
137#define __get_user_xb __get_user_x
138#endif
139
128#define __get_user_check(x,p) \ 140#define __get_user_check(x,p) \
129 ({ \ 141 ({ \
130 unsigned long __limit = current_thread_info()->addr_limit - 1; \ 142 unsigned long __limit = current_thread_info()->addr_limit - 1; \
131 register const typeof(*(p)) __user *__p asm("r0") = (p);\ 143 register const typeof(*(p)) __user *__p asm("r0") = (p);\
132 register unsigned long __r2 asm("r2"); \ 144 register typeof(x) __r2 asm("r2"); \
133 register unsigned long __l asm("r1") = __limit; \ 145 register unsigned long __l asm("r1") = __limit; \
134 register int __e asm("r0"); \ 146 register int __e asm("r0"); \
135 switch (sizeof(*(__p))) { \ 147 switch (sizeof(*(__p))) { \
@@ -142,6 +154,12 @@ extern int __get_user_4(void *);
142 case 4: \ 154 case 4: \
143 __get_user_x(__r2, __p, __e, __l, 4); \ 155 __get_user_x(__r2, __p, __e, __l, 4); \
144 break; \ 156 break; \
157 case 8: \
158 if (sizeof((x)) < 8) \
159 __get_user_xb(__r2, __p, __e, __l, 4); \
160 else \
161 __get_user_x(__r2, __p, __e, __l, 8); \
162 break; \
145 default: __e = __get_user_bad(); break; \ 163 default: __e = __get_user_bad(); break; \
146 } \ 164 } \
147 x = (typeof(*(p))) __r2; \ 165 x = (typeof(*(p))) __r2; \
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 43876245fc57..21ca0cebcab0 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -15,7 +15,17 @@
15 15
16#include <uapi/asm/unistd.h> 16#include <uapi/asm/unistd.h>
17 17
18/*
19 * This may need to be greater than __NR_last_syscall+1 in order to
20 * account for the padding in the syscall table
21 */
18#define __NR_syscalls (384) 22#define __NR_syscalls (384)
23
24/*
25 * *NOTE*: This is a ghost syscall private to the kernel. Only the
26 * __kuser_cmpxchg code in entry-armv.S should be aware of its
27 * existence. Don't ever use this from user code.
28 */
19#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) 29#define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0)
20 30
21#define __ARCH_WANT_STAT64 31#define __ARCH_WANT_STAT64
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index ba94446c72d9..acd5b66ea3aa 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -411,11 +411,6 @@
411#define __NR_renameat2 (__NR_SYSCALL_BASE+382) 411#define __NR_renameat2 (__NR_SYSCALL_BASE+382)
412 412
413/* 413/*
414 * This may need to be greater than __NR_last_syscall+1 in order to
415 * account for the padding in the syscall table
416 */
417
418/*
419 * The following SWIs are ARM private. 414 * The following SWIs are ARM private.
420 */ 415 */
421#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) 416#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000)
@@ -426,12 +421,6 @@
426#define __ARM_NR_set_tls (__ARM_NR_BASE+5) 421#define __ARM_NR_set_tls (__ARM_NR_BASE+5)
427 422
428/* 423/*
429 * *NOTE*: This is a ghost syscall private to the kernel. Only the
430 * __kuser_cmpxchg code in entry-armv.S should be aware of its
431 * existence. Don't ever use this from user code.
432 */
433
434/*
435 * The following syscalls are obsolete and no longer available for EABI. 424 * The following syscalls are obsolete and no longer available for EABI.
436 */ 425 */
437#if !defined(__KERNEL__) 426#if !defined(__KERNEL__)
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 14f7c3b14632..78c91b5f97d4 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -90,7 +90,7 @@ ENTRY(printascii)
90 ldrneb r1, [r0], #1 90 ldrneb r1, [r0], #1
91 teqne r1, #0 91 teqne r1, #0
92 bne 1b 92 bne 1b
93 mov pc, lr 93 ret lr
94ENDPROC(printascii) 94ENDPROC(printascii)
95 95
96ENTRY(printch) 96ENTRY(printch)
@@ -105,7 +105,7 @@ ENTRY(debug_ll_addr)
105 addruart r2, r3, ip 105 addruart r2, r3, ip
106 str r2, [r0] 106 str r2, [r0]
107 str r3, [r1] 107 str r3, [r1]
108 mov pc, lr 108 ret lr
109ENDPROC(debug_ll_addr) 109ENDPROC(debug_ll_addr)
110#endif 110#endif
111 111
@@ -116,7 +116,7 @@ ENTRY(printascii)
116 mov r0, #0x04 @ SYS_WRITE0 116 mov r0, #0x04 @ SYS_WRITE0
117 ARM( svc #0x123456 ) 117 ARM( svc #0x123456 )
118 THUMB( svc #0xab ) 118 THUMB( svc #0xab )
119 mov pc, lr 119 ret lr
120ENDPROC(printascii) 120ENDPROC(printascii)
121 121
122ENTRY(printch) 122ENTRY(printch)
@@ -125,14 +125,14 @@ ENTRY(printch)
125 mov r0, #0x03 @ SYS_WRITEC 125 mov r0, #0x03 @ SYS_WRITEC
126 ARM( svc #0x123456 ) 126 ARM( svc #0x123456 )
127 THUMB( svc #0xab ) 127 THUMB( svc #0xab )
128 mov pc, lr 128 ret lr
129ENDPROC(printch) 129ENDPROC(printch)
130 130
131ENTRY(debug_ll_addr) 131ENTRY(debug_ll_addr)
132 mov r2, #0 132 mov r2, #0
133 str r2, [r0] 133 str r2, [r0]
134 str r2, [r1] 134 str r2, [r1]
135 mov pc, lr 135 ret lr
136ENDPROC(debug_ll_addr) 136ENDPROC(debug_ll_addr)
137 137
138#endif 138#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 52a949a8077d..36276cdccfbc 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -224,7 +224,7 @@ svc_preempt:
2241: bl preempt_schedule_irq @ irq en/disable is done inside 2241: bl preempt_schedule_irq @ irq en/disable is done inside
225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 225 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
226 tst r0, #_TIF_NEED_RESCHED 226 tst r0, #_TIF_NEED_RESCHED
227 moveq pc, r8 @ go again 227 reteq r8 @ go again
228 b 1b 228 b 1b
229#endif 229#endif
230 230
@@ -490,7 +490,7 @@ ENDPROC(__und_usr)
490 .pushsection .fixup, "ax" 490 .pushsection .fixup, "ax"
491 .align 2 491 .align 2
4924: str r4, [sp, #S_PC] @ retry current instruction 4924: str r4, [sp, #S_PC] @ retry current instruction
493 mov pc, r9 493 ret r9
494 .popsection 494 .popsection
495 .pushsection __ex_table,"a" 495 .pushsection __ex_table,"a"
496 .long 1b, 4b 496 .long 1b, 4b
@@ -552,7 +552,7 @@ call_fpe:
552#endif 552#endif
553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 553 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 554 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
555 moveq pc, lr 555 reteq lr
556 and r8, r0, #0x00000f00 @ mask out CP number 556 and r8, r0, #0x00000f00 @ mask out CP number
557 THUMB( lsr r8, r8, #8 ) 557 THUMB( lsr r8, r8, #8 )
558 mov r7, #1 558 mov r7, #1
@@ -571,33 +571,33 @@ call_fpe:
571 THUMB( add pc, r8 ) 571 THUMB( add pc, r8 )
572 nop 572 nop
573 573
574 movw_pc lr @ CP#0 574 ret.w lr @ CP#0
575 W(b) do_fpe @ CP#1 (FPE) 575 W(b) do_fpe @ CP#1 (FPE)
576 W(b) do_fpe @ CP#2 (FPE) 576 W(b) do_fpe @ CP#2 (FPE)
577 movw_pc lr @ CP#3 577 ret.w lr @ CP#3
578#ifdef CONFIG_CRUNCH 578#ifdef CONFIG_CRUNCH
579 b crunch_task_enable @ CP#4 (MaverickCrunch) 579 b crunch_task_enable @ CP#4 (MaverickCrunch)
580 b crunch_task_enable @ CP#5 (MaverickCrunch) 580 b crunch_task_enable @ CP#5 (MaverickCrunch)
581 b crunch_task_enable @ CP#6 (MaverickCrunch) 581 b crunch_task_enable @ CP#6 (MaverickCrunch)
582#else 582#else
583 movw_pc lr @ CP#4 583 ret.w lr @ CP#4
584 movw_pc lr @ CP#5 584 ret.w lr @ CP#5
585 movw_pc lr @ CP#6 585 ret.w lr @ CP#6
586#endif 586#endif
587 movw_pc lr @ CP#7 587 ret.w lr @ CP#7
588 movw_pc lr @ CP#8 588 ret.w lr @ CP#8
589 movw_pc lr @ CP#9 589 ret.w lr @ CP#9
590#ifdef CONFIG_VFP 590#ifdef CONFIG_VFP
591 W(b) do_vfp @ CP#10 (VFP) 591 W(b) do_vfp @ CP#10 (VFP)
592 W(b) do_vfp @ CP#11 (VFP) 592 W(b) do_vfp @ CP#11 (VFP)
593#else 593#else
594 movw_pc lr @ CP#10 (VFP) 594 ret.w lr @ CP#10 (VFP)
595 movw_pc lr @ CP#11 (VFP) 595 ret.w lr @ CP#11 (VFP)
596#endif 596#endif
597 movw_pc lr @ CP#12 597 ret.w lr @ CP#12
598 movw_pc lr @ CP#13 598 ret.w lr @ CP#13
599 movw_pc lr @ CP#14 (Debug) 599 ret.w lr @ CP#14 (Debug)
600 movw_pc lr @ CP#15 (Control) 600 ret.w lr @ CP#15 (Control)
601 601
602#ifdef NEED_CPU_ARCHITECTURE 602#ifdef NEED_CPU_ARCHITECTURE
603 .align 2 603 .align 2
@@ -649,7 +649,7 @@ ENTRY(fp_enter)
649 .popsection 649 .popsection
650 650
651ENTRY(no_fp) 651ENTRY(no_fp)
652 mov pc, lr 652 ret lr
653ENDPROC(no_fp) 653ENDPROC(no_fp)
654 654
655__und_usr_fault_32: 655__und_usr_fault_32:
@@ -745,7 +745,7 @@ ENDPROC(__switch_to)
745#ifdef CONFIG_ARM_THUMB 745#ifdef CONFIG_ARM_THUMB
746 bx \reg 746 bx \reg
747#else 747#else
748 mov pc, \reg 748 ret \reg
749#endif 749#endif
750 .endm 750 .endm
751 751
@@ -837,7 +837,7 @@ kuser_cmpxchg64_fixup:
837#if __LINUX_ARM_ARCH__ < 6 837#if __LINUX_ARM_ARCH__ < 6
838 bcc kuser_cmpxchg32_fixup 838 bcc kuser_cmpxchg32_fixup
839#endif 839#endif
840 mov pc, lr 840 ret lr
841 .previous 841 .previous
842 842
843#else 843#else
@@ -905,7 +905,7 @@ kuser_cmpxchg32_fixup:
905 subs r8, r4, r7 905 subs r8, r4, r7
906 rsbcss r8, r8, #(2b - 1b) 906 rsbcss r8, r8, #(2b - 1b)
907 strcs r7, [sp, #S_PC] 907 strcs r7, [sp, #S_PC]
908 mov pc, lr 908 ret lr
909 .previous 909 .previous
910 910
911#else 911#else
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7139d4a7dea7..e52fe5a2d843 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -8,6 +8,7 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <asm/assembler.h>
11#include <asm/unistd.h> 12#include <asm/unistd.h>
12#include <asm/ftrace.h> 13#include <asm/ftrace.h>
13#include <asm/unwind.h> 14#include <asm/unwind.h>
@@ -88,7 +89,7 @@ ENTRY(ret_from_fork)
88 cmp r5, #0 89 cmp r5, #0
89 movne r0, r4 90 movne r0, r4
90 adrne lr, BSYM(1f) 91 adrne lr, BSYM(1f)
91 movne pc, r5 92 retne r5
921: get_thread_info tsk 931: get_thread_info tsk
93 b ret_slow_syscall 94 b ret_slow_syscall
94ENDPROC(ret_from_fork) 95ENDPROC(ret_from_fork)
@@ -290,7 +291,7 @@ ENDPROC(ftrace_graph_caller_old)
290 291
291.macro mcount_exit 292.macro mcount_exit
292 ldmia sp!, {r0-r3, ip, lr} 293 ldmia sp!, {r0-r3, ip, lr}
293 mov pc, ip 294 ret ip
294.endm 295.endm
295 296
296ENTRY(__gnu_mcount_nc) 297ENTRY(__gnu_mcount_nc)
@@ -298,7 +299,7 @@ UNWIND(.fnstart)
298#ifdef CONFIG_DYNAMIC_FTRACE 299#ifdef CONFIG_DYNAMIC_FTRACE
299 mov ip, lr 300 mov ip, lr
300 ldmia sp!, {lr} 301 ldmia sp!, {lr}
301 mov pc, ip 302 ret ip
302#else 303#else
303 __mcount 304 __mcount
304#endif 305#endif
@@ -333,12 +334,12 @@ return_to_handler:
333 bl ftrace_return_to_handler 334 bl ftrace_return_to_handler
334 mov lr, r0 @ r0 has real ret addr 335 mov lr, r0 @ r0 has real ret addr
335 ldmia sp!, {r0-r3} 336 ldmia sp!, {r0-r3}
336 mov pc, lr 337 ret lr
337#endif 338#endif
338 339
339ENTRY(ftrace_stub) 340ENTRY(ftrace_stub)
340.Lftrace_stub: 341.Lftrace_stub:
341 mov pc, lr 342 ret lr
342ENDPROC(ftrace_stub) 343ENDPROC(ftrace_stub)
343 344
344#endif /* CONFIG_FUNCTION_TRACER */ 345#endif /* CONFIG_FUNCTION_TRACER */
@@ -561,7 +562,7 @@ sys_mmap2:
561 streq r5, [sp, #4] 562 streq r5, [sp, #4]
562 beq sys_mmap_pgoff 563 beq sys_mmap_pgoff
563 mov r0, #-EINVAL 564 mov r0, #-EINVAL
564 mov pc, lr 565 ret lr
565#else 566#else
566 str r5, [sp, #4] 567 str r5, [sp, #4]
567 b sys_mmap_pgoff 568 b sys_mmap_pgoff
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 5d702f8900b1..8db307d0954b 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -240,12 +240,6 @@
240 movs pc, lr @ return & move spsr_svc into cpsr 240 movs pc, lr @ return & move spsr_svc into cpsr
241 .endm 241 .endm
242 242
243 @
244 @ 32-bit wide "mov pc, reg"
245 @
246 .macro movw_pc, reg
247 mov pc, \reg
248 .endm
249#else /* CONFIG_THUMB2_KERNEL */ 243#else /* CONFIG_THUMB2_KERNEL */
250 .macro svc_exit, rpsr, irq = 0 244 .macro svc_exit, rpsr, irq = 0
251 .if \irq != 0 245 .if \irq != 0
@@ -304,14 +298,6 @@
304 movs pc, lr @ return & move spsr_svc into cpsr 298 movs pc, lr @ return & move spsr_svc into cpsr
305 .endm 299 .endm
306#endif /* ifdef CONFIG_CPU_V7M / else */ 300#endif /* ifdef CONFIG_CPU_V7M / else */
307
308 @
309 @ 32-bit wide "mov pc, reg"
310 @
311 .macro movw_pc, reg
312 mov pc, \reg
313 nop
314 .endm
315#endif /* !CONFIG_THUMB2_KERNEL */ 301#endif /* !CONFIG_THUMB2_KERNEL */
316 302
317/* 303/*
diff --git a/arch/arm/kernel/fiqasm.S b/arch/arm/kernel/fiqasm.S
index 207f9d652010..8dd26e1a9bd6 100644
--- a/arch/arm/kernel/fiqasm.S
+++ b/arch/arm/kernel/fiqasm.S
@@ -32,7 +32,7 @@ ENTRY(__set_fiq_regs)
32 ldr lr, [r0] 32 ldr lr, [r0]
33 msr cpsr_c, r1 @ return to SVC mode 33 msr cpsr_c, r1 @ return to SVC mode
34 mov r0, r0 @ avoid hazard prior to ARMv4 34 mov r0, r0 @ avoid hazard prior to ARMv4
35 mov pc, lr 35 ret lr
36ENDPROC(__set_fiq_regs) 36ENDPROC(__set_fiq_regs)
37 37
38ENTRY(__get_fiq_regs) 38ENTRY(__get_fiq_regs)
@@ -45,5 +45,5 @@ ENTRY(__get_fiq_regs)
45 str lr, [r0] 45 str lr, [r0]
46 msr cpsr_c, r1 @ return to SVC mode 46 msr cpsr_c, r1 @ return to SVC mode
47 mov r0, r0 @ avoid hazard prior to ARMv4 47 mov r0, r0 @ avoid hazard prior to ARMv4
48 mov pc, lr 48 ret lr
49ENDPROC(__get_fiq_regs) 49ENDPROC(__get_fiq_regs)
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 572a38335c96..8733012d231f 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -10,6 +10,7 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13#include <asm/assembler.h>
13 14
14#define ATAG_CORE 0x54410001 15#define ATAG_CORE 0x54410001
15#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) 16#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)
@@ -61,10 +62,10 @@ __vet_atags:
61 cmp r5, r6 62 cmp r5, r6
62 bne 1f 63 bne 1f
63 64
642: mov pc, lr @ atag/dtb pointer is ok 652: ret lr @ atag/dtb pointer is ok
65 66
661: mov r2, #0 671: mov r2, #0
67 mov pc, lr 68 ret lr
68ENDPROC(__vet_atags) 69ENDPROC(__vet_atags)
69 70
70/* 71/*
@@ -162,7 +163,7 @@ __lookup_processor_type:
162 cmp r5, r6 163 cmp r5, r6
163 blo 1b 164 blo 1b
164 mov r5, #0 @ unknown processor 165 mov r5, #0 @ unknown processor
1652: mov pc, lr 1662: ret lr
166ENDPROC(__lookup_processor_type) 167ENDPROC(__lookup_processor_type)
167 168
168/* 169/*
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 716249cc2ee1..cc176b67c134 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -82,7 +82,7 @@ ENTRY(stext)
82 adr lr, BSYM(1f) @ return (PIC) address 82 adr lr, BSYM(1f) @ return (PIC) address
83 ARM( add pc, r10, #PROCINFO_INITFUNC ) 83 ARM( add pc, r10, #PROCINFO_INITFUNC )
84 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 84 THUMB( add r12, r10, #PROCINFO_INITFUNC )
85 THUMB( mov pc, r12 ) 85 THUMB( ret r12 )
86 1: b __after_proc_init 86 1: b __after_proc_init
87ENDPROC(stext) 87ENDPROC(stext)
88 88
@@ -119,7 +119,7 @@ ENTRY(secondary_startup)
119 mov r13, r12 @ __secondary_switched address 119 mov r13, r12 @ __secondary_switched address
120 ARM( add pc, r10, #PROCINFO_INITFUNC ) 120 ARM( add pc, r10, #PROCINFO_INITFUNC )
121 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 121 THUMB( add r12, r10, #PROCINFO_INITFUNC )
122 THUMB( mov pc, r12 ) 122 THUMB( ret r12 )
123ENDPROC(secondary_startup) 123ENDPROC(secondary_startup)
124 124
125ENTRY(__secondary_switched) 125ENTRY(__secondary_switched)
@@ -164,7 +164,7 @@ __after_proc_init:
164#endif 164#endif
165 mcr p15, 0, r0, c1, c0, 0 @ write control reg 165 mcr p15, 0, r0, c1, c0, 0 @ write control reg
166#endif /* CONFIG_CPU_CP15 */ 166#endif /* CONFIG_CPU_CP15 */
167 mov pc, r13 167 ret r13
168ENDPROC(__after_proc_init) 168ENDPROC(__after_proc_init)
169 .ltorg 169 .ltorg
170 170
@@ -254,7 +254,7 @@ ENTRY(__setup_mpu)
254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on) 254 orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU 255 mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
256 isb 256 isb
257 mov pc,lr 257 ret lr
258ENDPROC(__setup_mpu) 258ENDPROC(__setup_mpu)
259#endif 259#endif
260#include "head-common.S" 260#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2c35f0ff2fdc..664eee8c4a26 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -140,7 +140,7 @@ ENTRY(stext)
140 mov r8, r4 @ set TTBR1 to swapper_pg_dir 140 mov r8, r4 @ set TTBR1 to swapper_pg_dir
141 ARM( add pc, r10, #PROCINFO_INITFUNC ) 141 ARM( add pc, r10, #PROCINFO_INITFUNC )
142 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 142 THUMB( add r12, r10, #PROCINFO_INITFUNC )
143 THUMB( mov pc, r12 ) 143 THUMB( ret r12 )
1441: b __enable_mmu 1441: b __enable_mmu
145ENDPROC(stext) 145ENDPROC(stext)
146 .ltorg 146 .ltorg
@@ -335,7 +335,7 @@ __create_page_tables:
335 sub r4, r4, #0x1000 @ point to the PGD table 335 sub r4, r4, #0x1000 @ point to the PGD table
336 mov r4, r4, lsr #ARCH_PGD_SHIFT 336 mov r4, r4, lsr #ARCH_PGD_SHIFT
337#endif 337#endif
338 mov pc, lr 338 ret lr
339ENDPROC(__create_page_tables) 339ENDPROC(__create_page_tables)
340 .ltorg 340 .ltorg
341 .align 341 .align
@@ -383,7 +383,7 @@ ENTRY(secondary_startup)
383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 383 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
384 @ (return control reg) 384 @ (return control reg)
385 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 385 THUMB( add r12, r10, #PROCINFO_INITFUNC )
386 THUMB( mov pc, r12 ) 386 THUMB( ret r12 )
387ENDPROC(secondary_startup) 387ENDPROC(secondary_startup)
388 388
389 /* 389 /*
@@ -468,7 +468,7 @@ ENTRY(__turn_mmu_on)
468 instr_sync 468 instr_sync
469 mov r3, r3 469 mov r3, r3
470 mov r3, r13 470 mov r3, r13
471 mov pc, r3 471 ret r3
472__turn_mmu_on_end: 472__turn_mmu_on_end:
473ENDPROC(__turn_mmu_on) 473ENDPROC(__turn_mmu_on)
474 .popsection 474 .popsection
@@ -487,7 +487,7 @@ __fixup_smp:
487 orr r4, r4, #0x0000b000 487 orr r4, r4, #0x0000b000
488 orr r4, r4, #0x00000020 @ val 0x4100b020 488 orr r4, r4, #0x00000020 @ val 0x4100b020
489 teq r3, r4 @ ARM 11MPCore? 489 teq r3, r4 @ ARM 11MPCore?
490 moveq pc, lr @ yes, assume SMP 490 reteq lr @ yes, assume SMP
491 491
492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 492 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
493 and r0, r0, #0xc0000000 @ multiprocessing extensions and 493 and r0, r0, #0xc0000000 @ multiprocessing extensions and
@@ -500,7 +500,7 @@ __fixup_smp:
500 orr r4, r4, #0x0000c000 500 orr r4, r4, #0x0000c000
501 orr r4, r4, #0x00000090 501 orr r4, r4, #0x00000090
502 teq r3, r4 @ Check for ARM Cortex-A9 502 teq r3, r4 @ Check for ARM Cortex-A9
503 movne pc, lr @ Not ARM Cortex-A9, 503 retne lr @ Not ARM Cortex-A9,
504 504
505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the 505 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
506 @ below address check will need to be #ifdef'd or equivalent 506 @ below address check will need to be #ifdef'd or equivalent
@@ -512,7 +512,7 @@ __fixup_smp:
512ARM_BE8(rev r0, r0) @ byteswap if big endian 512ARM_BE8(rev r0, r0) @ byteswap if big endian
513 and r0, r0, #0x3 @ number of CPUs 513 and r0, r0, #0x3 @ number of CPUs
514 teq r0, #0x0 @ is 1? 514 teq r0, #0x0 @ is 1?
515 movne pc, lr 515 retne lr
516 516
517__fixup_smp_on_up: 517__fixup_smp_on_up:
518 adr r0, 1f 518 adr r0, 1f
@@ -539,7 +539,7 @@ smp_on_up:
539 .text 539 .text
540__do_fixup_smp_on_up: 540__do_fixup_smp_on_up:
541 cmp r4, r5 541 cmp r4, r5
542 movhs pc, lr 542 reths lr
543 ldmia r4!, {r0, r6} 543 ldmia r4!, {r0, r6}
544 ARM( str r6, [r0, r3] ) 544 ARM( str r6, [r0, r3] )
545 THUMB( add r0, r0, r3 ) 545 THUMB( add r0, r0, r3 )
@@ -672,7 +672,7 @@ ARM_BE8(rev16 ip, ip)
6722: cmp r4, r5 6722: cmp r4, r5
673 ldrcc r7, [r4], #4 @ use branch for delay slot 673 ldrcc r7, [r4], #4 @ use branch for delay slot
674 bcc 1b 674 bcc 1b
675 mov pc, lr 675 ret lr
676#endif 676#endif
677ENDPROC(__fixup_a_pv_table) 677ENDPROC(__fixup_a_pv_table)
678 678
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 797b1a6a4906..56ce6290c831 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary)
99 * immediately. 99 * immediately.
100 */ 100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7 101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 movne pc, lr 102 retne lr
103 103
104 /* 104 /*
105 * Once we have given up on one CPU, we do not try to install the 105 * Once we have given up on one CPU, we do not try to install the
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary)
111 */ 111 */
112 112
113 cmp r4, #HYP_MODE 113 cmp r4, #HYP_MODE
114 movne pc, lr @ give up if the CPU is not in HYP mode 114 retne lr @ give up if the CPU is not in HYP mode
115 115
116/* 116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set 117 * Configure HSCTLR to set correct exception endianness/instruction set
@@ -201,7 +201,7 @@ ENDPROC(__hyp_get_vectors)
201 @ fall through 201 @ fall through
202ENTRY(__hyp_set_vectors) 202ENTRY(__hyp_set_vectors)
203 __HVC(0) 203 __HVC(0)
204 mov pc, lr 204 ret lr
205ENDPROC(__hyp_set_vectors) 205ENDPROC(__hyp_set_vectors)
206 206
207#ifndef ZIMAGE 207#ifndef ZIMAGE
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 2b32978ae905..ad58e565fe98 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -100,7 +100,7 @@ ENTRY(iwmmxt_task_enable)
100 get_thread_info r10 100 get_thread_info r10
101#endif 101#endif
1024: dec_preempt_count r10, r3 1024: dec_preempt_count r10, r3
103 mov pc, r9 @ normal exit from exception 103 ret r9 @ normal exit from exception
104 104
105concan_save: 105concan_save:
106 106
@@ -144,7 +144,7 @@ concan_dump:
144 wstrd wR15, [r1, #MMX_WR15] 144 wstrd wR15, [r1, #MMX_WR15]
145 145
1462: teq r0, #0 @ anything to load? 1462: teq r0, #0 @ anything to load?
147 moveq pc, lr @ if not, return 147 reteq lr @ if not, return
148 148
149concan_load: 149concan_load:
150 150
@@ -177,10 +177,10 @@ concan_load:
177 @ clear CUP/MUP (only if r1 != 0) 177 @ clear CUP/MUP (only if r1 != 0)
178 teq r1, #0 178 teq r1, #0
179 mov r2, #0 179 mov r2, #0
180 moveq pc, lr 180 reteq lr
181 181
182 tmcr wCon, r2 182 tmcr wCon, r2
183 mov pc, lr 183 ret lr
184 184
185/* 185/*
186 * Back up Concan regs to save area and disable access to them 186 * Back up Concan regs to save area and disable access to them
@@ -266,7 +266,7 @@ ENTRY(iwmmxt_task_copy)
266 mov r3, lr @ preserve return address 266 mov r3, lr @ preserve return address
267 bl concan_dump 267 bl concan_dump
268 msr cpsr_c, ip @ restore interrupt mode 268 msr cpsr_c, ip @ restore interrupt mode
269 mov pc, r3 269 ret r3
270 270
271/* 271/*
272 * Restore Concan state from given memory address 272 * Restore Concan state from given memory address
@@ -302,7 +302,7 @@ ENTRY(iwmmxt_task_restore)
302 mov r3, lr @ preserve return address 302 mov r3, lr @ preserve return address
303 bl concan_load 303 bl concan_load
304 msr cpsr_c, ip @ restore interrupt mode 304 msr cpsr_c, ip @ restore interrupt mode
305 mov pc, r3 305 ret r3
306 306
307/* 307/*
308 * Concan handling on task switch 308 * Concan handling on task switch
@@ -324,7 +324,7 @@ ENTRY(iwmmxt_task_switch)
324 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area 324 add r3, r0, #TI_IWMMXT_STATE @ get next task Concan save area
325 ldr r2, [r2] @ get current Concan owner 325 ldr r2, [r2] @ get current Concan owner
326 teq r2, r3 @ next task owns it? 326 teq r2, r3 @ next task owns it?
327 movne pc, lr @ no: leave Concan disabled 327 retne lr @ no: leave Concan disabled
328 328
3291: @ flip Concan access 3291: @ flip Concan access
330 XSC(eor r1, r1, #0x3) 330 XSC(eor r1, r1, #0x3)
@@ -351,7 +351,7 @@ ENTRY(iwmmxt_task_release)
351 eors r0, r0, r1 @ if equal... 351 eors r0, r0, r1 @ if equal...
352 streq r0, [r3] @ then clear ownership 352 streq r0, [r3] @ then clear ownership
353 msr cpsr_c, r2 @ restore interrupts 353 msr cpsr_c, r2 @ restore interrupts
354 mov pc, lr 354 ret lr
355 355
356 .data 356 .data
357concan_owner: 357concan_owner:
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 4238bcba9d60..ae3e216a52c2 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -621,10 +621,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
621 return; 621 return;
622 } 622 }
623 623
624 fr.fp = regs->ARM_fp; 624 arm_get_current_stackframe(regs, &fr);
625 fr.sp = regs->ARM_sp;
626 fr.lr = regs->ARM_lr;
627 fr.pc = regs->ARM_pc;
628 walk_stackframe(&fr, callchain_trace, entry); 625 walk_stackframe(&fr, callchain_trace, entry);
629} 626}
630 627
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index af9e35e8836f..c02c2e8c877d 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -250,40 +250,39 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
250static int probe_current_pmu(struct arm_pmu *pmu) 250static int probe_current_pmu(struct arm_pmu *pmu)
251{ 251{
252 int cpu = get_cpu(); 252 int cpu = get_cpu();
253 unsigned long implementor = read_cpuid_implementor();
254 unsigned long part_number = read_cpuid_part_number();
255 int ret = -ENODEV; 253 int ret = -ENODEV;
256 254
257 pr_info("probing PMU on CPU %d\n", cpu); 255 pr_info("probing PMU on CPU %d\n", cpu);
258 256
257 switch (read_cpuid_part()) {
259 /* ARM Ltd CPUs. */ 258 /* ARM Ltd CPUs. */
260 if (implementor == ARM_CPU_IMP_ARM) { 259 case ARM_CPU_PART_ARM1136:
261 switch (part_number) { 260 case ARM_CPU_PART_ARM1156:
262 case ARM_CPU_PART_ARM1136: 261 case ARM_CPU_PART_ARM1176:
263 case ARM_CPU_PART_ARM1156: 262 ret = armv6pmu_init(pmu);
264 case ARM_CPU_PART_ARM1176: 263 break;
265 ret = armv6pmu_init(pmu); 264 case ARM_CPU_PART_ARM11MPCORE:
266 break; 265 ret = armv6mpcore_pmu_init(pmu);
267 case ARM_CPU_PART_ARM11MPCORE: 266 break;
268 ret = armv6mpcore_pmu_init(pmu); 267 case ARM_CPU_PART_CORTEX_A8:
269 break; 268 ret = armv7_a8_pmu_init(pmu);
270 case ARM_CPU_PART_CORTEX_A8: 269 break;
271 ret = armv7_a8_pmu_init(pmu); 270 case ARM_CPU_PART_CORTEX_A9:
272 break; 271 ret = armv7_a9_pmu_init(pmu);
273 case ARM_CPU_PART_CORTEX_A9: 272 break;
274 ret = armv7_a9_pmu_init(pmu); 273
275 break; 274 default:
276 } 275 if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
277 /* Intel CPUs [xscale]. */ 276 switch (xscale_cpu_arch_version()) {
278 } else if (implementor == ARM_CPU_IMP_INTEL) { 277 case ARM_CPU_XSCALE_ARCH_V1:
279 switch (xscale_cpu_arch_version()) { 278 ret = xscale1pmu_init(pmu);
280 case ARM_CPU_XSCALE_ARCH_V1: 279 break;
281 ret = xscale1pmu_init(pmu); 280 case ARM_CPU_XSCALE_ARCH_V2:
282 break; 281 ret = xscale2pmu_init(pmu);
283 case ARM_CPU_XSCALE_ARCH_V2: 282 break;
284 ret = xscale2pmu_init(pmu); 283 }
285 break;
286 } 284 }
285 break;
287 } 286 }
288 287
289 put_cpu(); 288 put_cpu();
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 95858966d84e..35e72585ec1d 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -3,6 +3,7 @@
3 */ 3 */
4 4
5#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <asm/assembler.h>
6#include <asm/kexec.h> 7#include <asm/kexec.h>
7 8
8 .align 3 /* not needed for this code, but keeps fncpy() happy */ 9 .align 3 /* not needed for this code, but keeps fncpy() happy */
@@ -59,7 +60,7 @@ ENTRY(relocate_new_kernel)
59 mov r0,#0 60 mov r0,#0
60 ldr r1,kexec_mach_type 61 ldr r1,kexec_mach_type
61 ldr r2,kexec_boot_atags 62 ldr r2,kexec_boot_atags
62 ARM( mov pc, lr ) 63 ARM( ret lr )
63 THUMB( bx lr ) 64 THUMB( bx lr )
64 65
65 .align 66 .align
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 1b880db2a033..e1e60e5a7a27 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -107,7 +107,7 @@ ENTRY(cpu_resume_mmu)
107 instr_sync 107 instr_sync
108 mov r0, r0 108 mov r0, r0
109 mov r0, r0 109 mov r0, r0
110 mov pc, r3 @ jump to virtual address 110 ret r3 @ jump to virtual address
111ENDPROC(cpu_resume_mmu) 111ENDPROC(cpu_resume_mmu)
112 .popsection 112 .popsection
113cpu_resume_after_mmu: 113cpu_resume_after_mmu:
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 1aafa0d785eb..72f9241ad5db 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -17,6 +17,8 @@
17#include <asm/cputype.h> 17#include <asm/cputype.h>
18 18
19#define SCU_CTRL 0x00 19#define SCU_CTRL 0x00
20#define SCU_ENABLE (1 << 0)
21#define SCU_STANDBY_ENABLE (1 << 5)
20#define SCU_CONFIG 0x04 22#define SCU_CONFIG 0x04
21#define SCU_CPU_STATUS 0x08 23#define SCU_CPU_STATUS 0x08
22#define SCU_INVALIDATE 0x0c 24#define SCU_INVALIDATE 0x0c
@@ -50,10 +52,16 @@ void scu_enable(void __iomem *scu_base)
50 52
51 scu_ctrl = readl_relaxed(scu_base + SCU_CTRL); 53 scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
52 /* already enabled? */ 54 /* already enabled? */
53 if (scu_ctrl & 1) 55 if (scu_ctrl & SCU_ENABLE)
54 return; 56 return;
55 57
56 scu_ctrl |= 1; 58 scu_ctrl |= SCU_ENABLE;
59
60 /* Cortex-A9 earlier than r2p0 has no standby bit in SCU */
61 if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090 &&
62 (read_cpuid_id() & 0x00f0000f) >= 0x00200000)
63 scu_ctrl |= SCU_STANDBY_ENABLE;
64
57 writel_relaxed(scu_ctrl, scu_base + SCU_CTRL); 65 writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
58 66
59 /* 67 /*
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 95d063620b76..2e72be4f623e 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -92,15 +92,19 @@ void erratum_a15_798181_init(void)
92 unsigned int midr = read_cpuid_id(); 92 unsigned int midr = read_cpuid_id();
93 unsigned int revidr = read_cpuid(CPUID_REVIDR); 93 unsigned int revidr = read_cpuid(CPUID_REVIDR);
94 94
95 /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */ 95 /* Brahma-B15 r0p0..r0p2 affected
96 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 || 96 * Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
97 (revidr & 0x210) == 0x210) { 97 if ((midr & 0xff0ffff0) == 0x420f00f0 && midr <= 0x420f00f2)
98 return;
99 }
100 if (revidr & 0x10)
101 erratum_a15_798181_handler = erratum_a15_798181_partial;
102 else
103 erratum_a15_798181_handler = erratum_a15_798181_broadcast; 98 erratum_a15_798181_handler = erratum_a15_798181_broadcast;
99 else if ((midr & 0xff0ffff0) == 0x410fc0f0 && midr <= 0x413fc0f2 &&
100 (revidr & 0x210) != 0x210) {
101 if (revidr & 0x10)
102 erratum_a15_798181_handler =
103 erratum_a15_798181_partial;
104 else
105 erratum_a15_798181_handler =
106 erratum_a15_798181_broadcast;
107 }
104} 108}
105#endif 109#endif
106 110
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 829a96d4a179..0cc7e58c47cc 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -50,10 +50,7 @@ unsigned long profile_pc(struct pt_regs *regs)
50 if (!in_lock_functions(regs->ARM_pc)) 50 if (!in_lock_functions(regs->ARM_pc))
51 return regs->ARM_pc; 51 return regs->ARM_pc;
52 52
53 frame.fp = regs->ARM_fp; 53 arm_get_current_stackframe(regs, &frame);
54 frame.sp = regs->ARM_sp;
55 frame.lr = regs->ARM_lr;
56 frame.pc = regs->ARM_pc;
57 do { 54 do {
58 int ret = unwind_frame(&frame); 55 int ret = unwind_frame(&frame);
59 if (ret < 0) 56 if (ret < 0)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index abd2fc067736..c8e4bb714944 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -31,11 +31,13 @@
31#include <asm/exception.h> 31#include <asm/exception.h>
32#include <asm/unistd.h> 32#include <asm/unistd.h>
33#include <asm/traps.h> 33#include <asm/traps.h>
34#include <asm/ptrace.h>
34#include <asm/unwind.h> 35#include <asm/unwind.h>
35#include <asm/tls.h> 36#include <asm/tls.h>
36#include <asm/system_misc.h> 37#include <asm/system_misc.h>
37#include <asm/opcodes.h> 38#include <asm/opcodes.h>
38 39
40
39static const char *handler[]= { 41static const char *handler[]= {
40 "prefetch abort", 42 "prefetch abort",
41 "data abort", 43 "data abort",
@@ -184,7 +186,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
184 tsk = current; 186 tsk = current;
185 187
186 if (regs) { 188 if (regs) {
187 fp = regs->ARM_fp; 189 fp = frame_pointer(regs);
188 mode = processor_mode(regs); 190 mode = processor_mode(regs);
189 } else if (tsk != current) { 191 } else if (tsk != current) {
190 fp = thread_saved_fp(tsk); 192 fp = thread_saved_fp(tsk);
@@ -719,7 +721,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
719 dump_instr("", regs); 721 dump_instr("", regs);
720 if (user_mode(regs)) { 722 if (user_mode(regs)) {
721 __show_regs(regs); 723 __show_regs(regs);
722 c_backtrace(regs->ARM_fp, processor_mode(regs)); 724 c_backtrace(frame_pointer(regs), processor_mode(regs));
723 } 725 }
724 } 726 }
725#endif 727#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index e67682f02cb2..a61a1dfbb0db 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -479,12 +479,10 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
479 tsk = current; 479 tsk = current;
480 480
481 if (regs) { 481 if (regs) {
482 frame.fp = regs->ARM_fp; 482 arm_get_current_stackframe(regs, &frame);
483 frame.sp = regs->ARM_sp;
484 frame.lr = regs->ARM_lr;
485 /* PC might be corrupted, use LR in that case. */ 483 /* PC might be corrupted, use LR in that case. */
486 frame.pc = kernel_text_address(regs->ARM_pc) 484 if (!kernel_text_address(regs->ARM_pc))
487 ? regs->ARM_pc : regs->ARM_lr; 485 frame.pc = regs->ARM_lr;
488 } else if (tsk == current) { 486 } else if (tsk == current) {
489 frame.fp = (unsigned long)__builtin_frame_address(0); 487 frame.fp = (unsigned long)__builtin_frame_address(0);
490 frame.sp = current_sp; 488 frame.sp = current_sp;
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 7bcee5c9b604..6f57cb94367f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -318,7 +318,6 @@ SECTIONS
318 _end = .; 318 _end = .;
319 319
320 STABS_DEBUG 320 STABS_DEBUG
321 .comment 0 : { *(.comment) }
322} 321}
323 322
324/* 323/*
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index b23a59c1c522..70bf49b8b244 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -274,13 +274,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
274 274
275int __attribute_const__ kvm_target_cpu(void) 275int __attribute_const__ kvm_target_cpu(void)
276{ 276{
277 unsigned long implementor = read_cpuid_implementor(); 277 switch (read_cpuid_part()) {
278 unsigned long part_number = read_cpuid_part_number();
279
280 if (implementor != ARM_CPU_IMP_ARM)
281 return -EINVAL;
282
283 switch (part_number) {
284 case ARM_CPU_PART_CORTEX_A7: 278 case ARM_CPU_PART_CORTEX_A7:
285 return KVM_ARM_TARGET_CORTEX_A7; 279 return KVM_ARM_TARGET_CORTEX_A7;
286 case ARM_CPU_PART_CORTEX_A15: 280 case ARM_CPU_PART_CORTEX_A15:
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 1b9844d369cc..b2d229f09c07 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <asm/assembler.h>
20#include <asm/unified.h> 21#include <asm/unified.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/kvm_asm.h> 23#include <asm/kvm_asm.h>
@@ -134,7 +135,7 @@ phase2:
134 ldr r0, =TRAMPOLINE_VA 135 ldr r0, =TRAMPOLINE_VA
135 adr r1, target 136 adr r1, target
136 bfi r0, r1, #0, #PAGE_SHIFT 137 bfi r0, r1, #0, #PAGE_SHIFT
137 mov pc, r0 138 ret r0
138 139
139target: @ We're now in the trampoline code, switch page tables 140target: @ We're now in the trampoline code, switch page tables
140 mcrr p15, 4, r2, r3, c2 141 mcrr p15, 4, r2, r3, c2
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S
index 638deb13da1c..b05e95840651 100644
--- a/arch/arm/lib/ashldi3.S
+++ b/arch/arm/lib/ashldi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsl)
47 THUMB( lsrmi r3, al, ip ) 48 THUMB( lsrmi r3, al, ip )
48 THUMB( orrmi ah, ah, r3 ) 49 THUMB( orrmi ah, ah, r3 )
49 mov al, al, lsl r2 50 mov al, al, lsl r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashldi3) 53ENDPROC(__ashldi3)
53ENDPROC(__aeabi_llsl) 54ENDPROC(__aeabi_llsl)
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S
index 015e8aa5a1d1..275d7d2341a4 100644
--- a/arch/arm/lib/ashrdi3.S
+++ b/arch/arm/lib/ashrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_lasr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, asr r2 50 mov ah, ah, asr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__ashrdi3) 53ENDPROC(__ashrdi3)
53ENDPROC(__aeabi_lasr) 54ENDPROC(__aeabi_lasr)
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S
index 4102be617fce..fab5a50503ae 100644
--- a/arch/arm/lib/backtrace.S
+++ b/arch/arm/lib/backtrace.S
@@ -25,7 +25,7 @@
25ENTRY(c_backtrace) 25ENTRY(c_backtrace)
26 26
27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK) 27#if !defined(CONFIG_FRAME_POINTER) || !defined(CONFIG_PRINTK)
28 mov pc, lr 28 ret lr
29ENDPROC(c_backtrace) 29ENDPROC(c_backtrace)
30#else 30#else
31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... 31 stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location...
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h
index 9f12ed1eea86..7d807cfd8ef5 100644
--- a/arch/arm/lib/bitops.h
+++ b/arch/arm/lib/bitops.h
@@ -1,3 +1,4 @@
1#include <asm/assembler.h>
1#include <asm/unwind.h> 2#include <asm/unwind.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
@@ -70,7 +71,7 @@ UNWIND( .fnstart )
70 \instr r2, r2, r3 71 \instr r2, r2, r3
71 str r2, [r1, r0, lsl #2] 72 str r2, [r1, r0, lsl #2]
72 restore_irqs ip 73 restore_irqs ip
73 mov pc, lr 74 ret lr
74UNWIND( .fnend ) 75UNWIND( .fnend )
75ENDPROC(\name ) 76ENDPROC(\name )
76 .endm 77 .endm
@@ -98,7 +99,7 @@ UNWIND( .fnstart )
98 \store r2, [r1] 99 \store r2, [r1]
99 moveq r0, #0 100 moveq r0, #0
100 restore_irqs ip 101 restore_irqs ip
101 mov pc, lr 102 ret lr
102UNWIND( .fnend ) 103UNWIND( .fnend )
103ENDPROC(\name ) 104ENDPROC(\name )
104 .endm 105 .endm
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S
index 9fcdd154eff9..07cda737bb11 100644
--- a/arch/arm/lib/bswapsdi2.S
+++ b/arch/arm/lib/bswapsdi2.S
@@ -1,4 +1,5 @@
1#include <linux/linkage.h> 1#include <linux/linkage.h>
2#include <asm/assembler.h>
2 3
3#if __LINUX_ARM_ARCH__ >= 6 4#if __LINUX_ARM_ARCH__ >= 6
4ENTRY(__bswapsi2) 5ENTRY(__bswapsi2)
@@ -18,7 +19,7 @@ ENTRY(__bswapsi2)
18 mov r3, r3, lsr #8 19 mov r3, r3, lsr #8
19 bic r3, r3, #0xff00 20 bic r3, r3, #0xff00
20 eor r0, r3, r0, ror #8 21 eor r0, r3, r0, ror #8
21 mov pc, lr 22 ret lr
22ENDPROC(__bswapsi2) 23ENDPROC(__bswapsi2)
23 24
24ENTRY(__bswapdi2) 25ENTRY(__bswapdi2)
@@ -31,6 +32,6 @@ ENTRY(__bswapdi2)
31 bic r1, r1, #0xff00 32 bic r1, r1, #0xff00
32 eor r1, r1, r0, ror #8 33 eor r1, r1, r0, ror #8
33 eor r0, r3, ip, ror #8 34 eor r0, r3, ip, ror #8
34 mov pc, lr 35 ret lr
35ENDPROC(__bswapdi2) 36ENDPROC(__bswapdi2)
36#endif 37#endif
diff --git a/arch/arm/lib/call_with_stack.S b/arch/arm/lib/call_with_stack.S
index 916c80f13ae7..ed1a421813cb 100644
--- a/arch/arm/lib/call_with_stack.S
+++ b/arch/arm/lib/call_with_stack.S
@@ -36,9 +36,9 @@ ENTRY(call_with_stack)
36 mov r0, r1 36 mov r0, r1
37 37
38 adr lr, BSYM(1f) 38 adr lr, BSYM(1f)
39 mov pc, r2 39 ret r2
40 40
411: ldr lr, [sp] 411: ldr lr, [sp]
42 ldr sp, [sp, #4] 42 ldr sp, [sp, #4]
43 mov pc, lr 43 ret lr
44ENDPROC(call_with_stack) 44ENDPROC(call_with_stack)
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index 31d3cb34740d..984e0f29d548 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -97,7 +97,7 @@ td3 .req lr
97#endif 97#endif
98#endif 98#endif
99 adcnes sum, sum, td0 @ update checksum 99 adcnes sum, sum, td0 @ update checksum
100 mov pc, lr 100 ret lr
101 101
102ENTRY(csum_partial) 102ENTRY(csum_partial)
103 stmfd sp!, {buf, lr} 103 stmfd sp!, {buf, lr}
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S
index d6e742d24007..10b45909610c 100644
--- a/arch/arm/lib/csumpartialcopygeneric.S
+++ b/arch/arm/lib/csumpartialcopygeneric.S
@@ -7,6 +7,7 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <asm/assembler.h>
10 11
11/* 12/*
12 * unsigned int 13 * unsigned int
@@ -40,7 +41,7 @@ sum .req r3
40 adcs sum, sum, ip, put_byte_1 @ update checksum 41 adcs sum, sum, ip, put_byte_1 @ update checksum
41 strb ip, [dst], #1 42 strb ip, [dst], #1
42 tst dst, #2 43 tst dst, #2
43 moveq pc, lr @ dst is now 32bit aligned 44 reteq lr @ dst is now 32bit aligned
44 45
45.Ldst_16bit: load2b r8, ip 46.Ldst_16bit: load2b r8, ip
46 sub len, len, #2 47 sub len, len, #2
@@ -48,7 +49,7 @@ sum .req r3
48 strb r8, [dst], #1 49 strb r8, [dst], #1
49 adcs sum, sum, ip, put_byte_1 50 adcs sum, sum, ip, put_byte_1
50 strb ip, [dst], #1 51 strb ip, [dst], #1
51 mov pc, lr @ dst is now 32bit aligned 52 ret lr @ dst is now 32bit aligned
52 53
53 /* 54 /*
54 * Handle 0 to 7 bytes, with any alignment of source and 55 * Handle 0 to 7 bytes, with any alignment of source and
diff --git a/arch/arm/lib/delay-loop.S b/arch/arm/lib/delay-loop.S
index bc1033b897b4..518bf6e93f78 100644
--- a/arch/arm/lib/delay-loop.S
+++ b/arch/arm/lib/delay-loop.S
@@ -35,7 +35,7 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
35 mul r0, r2, r0 @ max = 2^32-1 35 mul r0, r2, r0 @ max = 2^32-1
36 add r0, r0, r1, lsr #32-6 36 add r0, r0, r1, lsr #32-6
37 movs r0, r0, lsr #6 37 movs r0, r0, lsr #6
38 moveq pc, lr 38 reteq lr
39 39
40/* 40/*
41 * loops = r0 * HZ * loops_per_jiffy / 1000000 41 * loops = r0 * HZ * loops_per_jiffy / 1000000
@@ -46,23 +46,23 @@ ENTRY(__loop_const_udelay) @ 0 <= r0 <= 0x7fffff06
46ENTRY(__loop_delay) 46ENTRY(__loop_delay)
47 subs r0, r0, #1 47 subs r0, r0, #1
48#if 0 48#if 0
49 movls pc, lr 49 retls lr
50 subs r0, r0, #1 50 subs r0, r0, #1
51 movls pc, lr 51 retls lr
52 subs r0, r0, #1 52 subs r0, r0, #1
53 movls pc, lr 53 retls lr
54 subs r0, r0, #1 54 subs r0, r0, #1
55 movls pc, lr 55 retls lr
56 subs r0, r0, #1 56 subs r0, r0, #1
57 movls pc, lr 57 retls lr
58 subs r0, r0, #1 58 subs r0, r0, #1
59 movls pc, lr 59 retls lr
60 subs r0, r0, #1 60 subs r0, r0, #1
61 movls pc, lr 61 retls lr
62 subs r0, r0, #1 62 subs r0, r0, #1
63#endif 63#endif
64 bhi __loop_delay 64 bhi __loop_delay
65 mov pc, lr 65 ret lr
66ENDPROC(__loop_udelay) 66ENDPROC(__loop_udelay)
67ENDPROC(__loop_const_udelay) 67ENDPROC(__loop_const_udelay)
68ENDPROC(__loop_delay) 68ENDPROC(__loop_delay)
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S
index e55c4842c290..a9eafe4981eb 100644
--- a/arch/arm/lib/div64.S
+++ b/arch/arm/lib/div64.S
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <asm/assembler.h>
16#include <asm/unwind.h> 17#include <asm/unwind.h>
17 18
18#ifdef __ARMEB__ 19#ifdef __ARMEB__
@@ -97,7 +98,7 @@ UNWIND(.fnstart)
97 mov yl, #0 98 mov yl, #0
98 cmpeq xl, r4 99 cmpeq xl, r4
99 movlo xh, xl 100 movlo xh, xl
100 movlo pc, lr 101 retlo lr
101 102
102 @ The division loop for lower bit positions. 103 @ The division loop for lower bit positions.
103 @ Here we shift remainer bits leftwards rather than moving the 104 @ Here we shift remainer bits leftwards rather than moving the
@@ -111,14 +112,14 @@ UNWIND(.fnstart)
111 subcs xh, xh, r4 112 subcs xh, xh, r4
112 movs ip, ip, lsr #1 113 movs ip, ip, lsr #1
113 bne 4b 114 bne 4b
114 mov pc, lr 115 ret lr
115 116
116 @ The top part of remainder became zero. If carry is set 117 @ The top part of remainder became zero. If carry is set
117 @ (the 33th bit) this is a false positive so resume the loop. 118 @ (the 33th bit) this is a false positive so resume the loop.
118 @ Otherwise, if lower part is also null then we are done. 119 @ Otherwise, if lower part is also null then we are done.
1196: bcs 5b 1206: bcs 5b
120 cmp xl, #0 121 cmp xl, #0
121 moveq pc, lr 122 reteq lr
122 123
123 @ We still have remainer bits in the low part. Bring them up. 124 @ We still have remainer bits in the low part. Bring them up.
124 125
@@ -144,7 +145,7 @@ UNWIND(.fnstart)
144 movs ip, ip, lsr #1 145 movs ip, ip, lsr #1
145 mov xh, #1 146 mov xh, #1
146 bne 4b 147 bne 4b
147 mov pc, lr 148 ret lr
148 149
1498: @ Division by a power of 2: determine what that divisor order is 1508: @ Division by a power of 2: determine what that divisor order is
150 @ then simply shift values around 151 @ then simply shift values around
@@ -184,13 +185,13 @@ UNWIND(.fnstart)
184 THUMB( orr yl, yl, xh ) 185 THUMB( orr yl, yl, xh )
185 mov xh, xl, lsl ip 186 mov xh, xl, lsl ip
186 mov xh, xh, lsr ip 187 mov xh, xh, lsr ip
187 mov pc, lr 188 ret lr
188 189
189 @ eq -> division by 1: obvious enough... 190 @ eq -> division by 1: obvious enough...
1909: moveq yl, xl 1919: moveq yl, xl
191 moveq yh, xh 192 moveq yh, xh
192 moveq xh, #0 193 moveq xh, #0
193 moveq pc, lr 194 reteq lr
194UNWIND(.fnend) 195UNWIND(.fnend)
195 196
196UNWIND(.fnstart) 197UNWIND(.fnstart)
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 64f6bc1a9132..7848780e8834 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -35,7 +35,7 @@ ENTRY(_find_first_zero_bit_le)
352: cmp r2, r1 @ any more? 352: cmp r2, r1 @ any more?
36 blo 1b 36 blo 1b
373: mov r0, r1 @ no free bits 373: mov r0, r1 @ no free bits
38 mov pc, lr 38 ret lr
39ENDPROC(_find_first_zero_bit_le) 39ENDPROC(_find_first_zero_bit_le)
40 40
41/* 41/*
@@ -76,7 +76,7 @@ ENTRY(_find_first_bit_le)
762: cmp r2, r1 @ any more? 762: cmp r2, r1 @ any more?
77 blo 1b 77 blo 1b
783: mov r0, r1 @ no free bits 783: mov r0, r1 @ no free bits
79 mov pc, lr 79 ret lr
80ENDPROC(_find_first_bit_le) 80ENDPROC(_find_first_bit_le)
81 81
82/* 82/*
@@ -114,7 +114,7 @@ ENTRY(_find_first_zero_bit_be)
1142: cmp r2, r1 @ any more? 1142: cmp r2, r1 @ any more?
115 blo 1b 115 blo 1b
1163: mov r0, r1 @ no free bits 1163: mov r0, r1 @ no free bits
117 mov pc, lr 117 ret lr
118ENDPROC(_find_first_zero_bit_be) 118ENDPROC(_find_first_zero_bit_be)
119 119
120ENTRY(_find_next_zero_bit_be) 120ENTRY(_find_next_zero_bit_be)
@@ -148,7 +148,7 @@ ENTRY(_find_first_bit_be)
1482: cmp r2, r1 @ any more? 1482: cmp r2, r1 @ any more?
149 blo 1b 149 blo 1b
1503: mov r0, r1 @ no free bits 1503: mov r0, r1 @ no free bits
151 mov pc, lr 151 ret lr
152ENDPROC(_find_first_bit_be) 152ENDPROC(_find_first_bit_be)
153 153
154ENTRY(_find_next_bit_be) 154ENTRY(_find_next_bit_be)
@@ -192,5 +192,5 @@ ENDPROC(_find_next_bit_be)
192#endif 192#endif
193 cmp r1, r0 @ Clamp to maxbit 193 cmp r1, r0 @ Clamp to maxbit
194 movlo r0, r1 194 movlo r0, r1
195 mov pc, lr 195 ret lr
196 196
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index 9b06bb41fca6..938600098b88 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -18,7 +18,7 @@
18 * Inputs: r0 contains the address 18 * Inputs: r0 contains the address
19 * r1 contains the address limit, which must be preserved 19 * r1 contains the address limit, which must be preserved
20 * Outputs: r0 is the error code 20 * Outputs: r0 is the error code
21 * r2 contains the zero-extended value 21 * r2, r3 contains the zero-extended value
22 * lr corrupted 22 * lr corrupted
23 * 23 *
24 * No other registers must be altered. (see <asm/uaccess.h> 24 * No other registers must be altered. (see <asm/uaccess.h>
@@ -36,7 +36,7 @@ ENTRY(__get_user_1)
36 check_uaccess r0, 1, r1, r2, __get_user_bad 36 check_uaccess r0, 1, r1, r2, __get_user_bad
371: TUSER(ldrb) r2, [r0] 371: TUSER(ldrb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__get_user_1) 40ENDPROC(__get_user_1)
41 41
42ENTRY(__get_user_2) 42ENTRY(__get_user_2)
@@ -56,25 +56,60 @@ rb .req r0
56 orr r2, rb, r2, lsl #8 56 orr r2, rb, r2, lsl #8
57#endif 57#endif
58 mov r0, #0 58 mov r0, #0
59 mov pc, lr 59 ret lr
60ENDPROC(__get_user_2) 60ENDPROC(__get_user_2)
61 61
62ENTRY(__get_user_4) 62ENTRY(__get_user_4)
63 check_uaccess r0, 4, r1, r2, __get_user_bad 63 check_uaccess r0, 4, r1, r2, __get_user_bad
644: TUSER(ldr) r2, [r0] 644: TUSER(ldr) r2, [r0]
65 mov r0, #0 65 mov r0, #0
66 mov pc, lr 66 ret lr
67ENDPROC(__get_user_4) 67ENDPROC(__get_user_4)
68 68
69ENTRY(__get_user_8)
70 check_uaccess r0, 8, r1, r2, __get_user_bad
71#ifdef CONFIG_THUMB2_KERNEL
725: TUSER(ldr) r2, [r0]
736: TUSER(ldr) r3, [r0, #4]
74#else
755: TUSER(ldr) r2, [r0], #4
766: TUSER(ldr) r3, [r0]
77#endif
78 mov r0, #0
79 ret lr
80ENDPROC(__get_user_8)
81
82#ifdef __ARMEB__
83ENTRY(__get_user_lo8)
84 check_uaccess r0, 8, r1, r2, __get_user_bad
85#ifdef CONFIG_CPU_USE_DOMAINS
86 add r0, r0, #4
877: ldrt r2, [r0]
88#else
897: ldr r2, [r0, #4]
90#endif
91 mov r0, #0
92 ret lr
93ENDPROC(__get_user_lo8)
94#endif
95
96__get_user_bad8:
97 mov r3, #0
69__get_user_bad: 98__get_user_bad:
70 mov r2, #0 99 mov r2, #0
71 mov r0, #-EFAULT 100 mov r0, #-EFAULT
72 mov pc, lr 101 ret lr
73ENDPROC(__get_user_bad) 102ENDPROC(__get_user_bad)
103ENDPROC(__get_user_bad8)
74 104
75.pushsection __ex_table, "a" 105.pushsection __ex_table, "a"
76 .long 1b, __get_user_bad 106 .long 1b, __get_user_bad
77 .long 2b, __get_user_bad 107 .long 2b, __get_user_bad
78 .long 3b, __get_user_bad 108 .long 3b, __get_user_bad
79 .long 4b, __get_user_bad 109 .long 4b, __get_user_bad
110 .long 5b, __get_user_bad8
111 .long 6b, __get_user_bad8
112#ifdef __ARMEB__
113 .long 7b, __get_user_bad
114#endif
80.popsection 115.popsection
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S
index 9f4238987fe9..c31b2f3153f1 100644
--- a/arch/arm/lib/io-readsb.S
+++ b/arch/arm/lib/io-readsb.S
@@ -25,7 +25,7 @@
25 25
26ENTRY(__raw_readsb) 26ENTRY(__raw_readsb)
27 teq r2, #0 @ do we have to check for the zero len? 27 teq r2, #0 @ do we have to check for the zero len?
28 moveq pc, lr 28 reteq lr
29 ands ip, r1, #3 29 ands ip, r1, #3
30 bne .Linsb_align 30 bne .Linsb_align
31 31
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S
index 7a7430950c79..2ed86fa5465f 100644
--- a/arch/arm/lib/io-readsl.S
+++ b/arch/arm/lib/io-readsl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_readsl) 13ENTRY(__raw_readsl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_readsl)
33 stmcsia r1!, {r3, ip} 33 stmcsia r1!, {r3, ip}
34 ldrne r3, [r0, #0] 34 ldrne r3, [r0, #0]
35 strne r3, [r1, #0] 35 strne r3, [r1, #0]
36 mov pc, lr 36 ret lr
37 37
383: ldr r3, [r0] 383: ldr r3, [r0]
39 cmp ip, #2 39 cmp ip, #2
@@ -75,5 +75,5 @@ ENTRY(__raw_readsl)
75 strb r3, [r1, #1] 75 strb r3, [r1, #1]
768: mov r3, ip, get_byte_0 768: mov r3, ip, get_byte_0
77 strb r3, [r1, #0] 77 strb r3, [r1, #0]
78 mov pc, lr 78 ret lr
79ENDPROC(__raw_readsl) 79ENDPROC(__raw_readsl)
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S
index 88487c8c4f23..413da9914529 100644
--- a/arch/arm/lib/io-readsw-armv3.S
+++ b/arch/arm/lib/io-readsw-armv3.S
@@ -27,11 +27,11 @@
27 strb r3, [r1], #1 27 strb r3, [r1], #1
28 28
29 subs r2, r2, #1 29 subs r2, r2, #1
30 moveq pc, lr 30 reteq lr
31 31
32ENTRY(__raw_readsw) 32ENTRY(__raw_readsw)
33 teq r2, #0 @ do we have to check for the zero len? 33 teq r2, #0 @ do we have to check for the zero len?
34 moveq pc, lr 34 reteq lr
35 tst r1, #3 35 tst r1, #3
36 bne .Linsw_align 36 bne .Linsw_align
37 37
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S
index 1f393d42593d..d9a45e9692ae 100644
--- a/arch/arm/lib/io-readsw-armv4.S
+++ b/arch/arm/lib/io-readsw-armv4.S
@@ -26,7 +26,7 @@
26 26
27ENTRY(__raw_readsw) 27ENTRY(__raw_readsw)
28 teq r2, #0 28 teq r2, #0
29 moveq pc, lr 29 reteq lr
30 tst r1, #3 30 tst r1, #3
31 bne .Linsw_align 31 bne .Linsw_align
32 32
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S
index 68b92f4acaeb..a46bbc9b168b 100644
--- a/arch/arm/lib/io-writesb.S
+++ b/arch/arm/lib/io-writesb.S
@@ -45,7 +45,7 @@
45 45
46ENTRY(__raw_writesb) 46ENTRY(__raw_writesb)
47 teq r2, #0 @ do we have to check for the zero len? 47 teq r2, #0 @ do we have to check for the zero len?
48 moveq pc, lr 48 reteq lr
49 ands ip, r1, #3 49 ands ip, r1, #3
50 bne .Loutsb_align 50 bne .Loutsb_align
51 51
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S
index d0d104a0dd11..4ea2435988c1 100644
--- a/arch/arm/lib/io-writesl.S
+++ b/arch/arm/lib/io-writesl.S
@@ -12,7 +12,7 @@
12 12
13ENTRY(__raw_writesl) 13ENTRY(__raw_writesl)
14 teq r2, #0 @ do we have to check for the zero len? 14 teq r2, #0 @ do we have to check for the zero len?
15 moveq pc, lr 15 reteq lr
16 ands ip, r1, #3 16 ands ip, r1, #3
17 bne 3f 17 bne 3f
18 18
@@ -33,7 +33,7 @@ ENTRY(__raw_writesl)
33 ldrne r3, [r1, #0] 33 ldrne r3, [r1, #0]
34 strcs ip, [r0, #0] 34 strcs ip, [r0, #0]
35 strne r3, [r0, #0] 35 strne r3, [r0, #0]
36 mov pc, lr 36 ret lr
37 37
383: bic r1, r1, #3 383: bic r1, r1, #3
39 ldr r3, [r1], #4 39 ldr r3, [r1], #4
@@ -47,7 +47,7 @@ ENTRY(__raw_writesl)
47 orr ip, ip, r3, lspush #16 47 orr ip, ip, r3, lspush #16
48 str ip, [r0] 48 str ip, [r0]
49 bne 4b 49 bne 4b
50 mov pc, lr 50 ret lr
51 51
525: mov ip, r3, lspull #8 525: mov ip, r3, lspull #8
53 ldr r3, [r1], #4 53 ldr r3, [r1], #4
@@ -55,7 +55,7 @@ ENTRY(__raw_writesl)
55 orr ip, ip, r3, lspush #24 55 orr ip, ip, r3, lspush #24
56 str ip, [r0] 56 str ip, [r0]
57 bne 5b 57 bne 5b
58 mov pc, lr 58 ret lr
59 59
606: mov ip, r3, lspull #24 606: mov ip, r3, lspull #24
61 ldr r3, [r1], #4 61 ldr r3, [r1], #4
@@ -63,5 +63,5 @@ ENTRY(__raw_writesl)
63 orr ip, ip, r3, lspush #8 63 orr ip, ip, r3, lspush #8
64 str ip, [r0] 64 str ip, [r0]
65 bne 6b 65 bne 6b
66 mov pc, lr 66 ret lr
67ENDPROC(__raw_writesl) 67ENDPROC(__raw_writesl)
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S
index 49b800419e32..121789eb6802 100644
--- a/arch/arm/lib/io-writesw-armv3.S
+++ b/arch/arm/lib/io-writesw-armv3.S
@@ -28,11 +28,11 @@
28 orr r3, r3, r3, lsl #16 28 orr r3, r3, r3, lsl #16
29 str r3, [r0] 29 str r3, [r0]
30 subs r2, r2, #1 30 subs r2, r2, #1
31 moveq pc, lr 31 reteq lr
32 32
33ENTRY(__raw_writesw) 33ENTRY(__raw_writesw)
34 teq r2, #0 @ do we have to check for the zero len? 34 teq r2, #0 @ do we have to check for the zero len?
35 moveq pc, lr 35 reteq lr
36 tst r1, #3 36 tst r1, #3
37 bne .Loutsw_align 37 bne .Loutsw_align
38 38
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S
index ff4f71b579ee..269f90c51ad2 100644
--- a/arch/arm/lib/io-writesw-armv4.S
+++ b/arch/arm/lib/io-writesw-armv4.S
@@ -31,7 +31,7 @@
31 31
32ENTRY(__raw_writesw) 32ENTRY(__raw_writesw)
33 teq r2, #0 33 teq r2, #0
34 moveq pc, lr 34 reteq lr
35 ands r3, r1, #3 35 ands r3, r1, #3
36 bne .Loutsw_align 36 bne .Loutsw_align
37 37
@@ -96,5 +96,5 @@ ENTRY(__raw_writesw)
96 tst r2, #1 96 tst r2, #1
973: movne ip, r3, lsr #8 973: movne ip, r3, lsr #8
98 strneh ip, [r0] 98 strneh ip, [r0]
99 mov pc, lr 99 ret lr
100ENDPROC(__raw_writesw) 100ENDPROC(__raw_writesw)
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S
index c562f649734c..947567ff67f9 100644
--- a/arch/arm/lib/lib1funcs.S
+++ b/arch/arm/lib/lib1funcs.S
@@ -210,7 +210,7 @@ ENTRY(__aeabi_uidiv)
210UNWIND(.fnstart) 210UNWIND(.fnstart)
211 211
212 subs r2, r1, #1 212 subs r2, r1, #1
213 moveq pc, lr 213 reteq lr
214 bcc Ldiv0 214 bcc Ldiv0
215 cmp r0, r1 215 cmp r0, r1
216 bls 11f 216 bls 11f
@@ -220,16 +220,16 @@ UNWIND(.fnstart)
220 ARM_DIV_BODY r0, r1, r2, r3 220 ARM_DIV_BODY r0, r1, r2, r3
221 221
222 mov r0, r2 222 mov r0, r2
223 mov pc, lr 223 ret lr
224 224
22511: moveq r0, #1 22511: moveq r0, #1
226 movne r0, #0 226 movne r0, #0
227 mov pc, lr 227 ret lr
228 228
22912: ARM_DIV2_ORDER r1, r2 22912: ARM_DIV2_ORDER r1, r2
230 230
231 mov r0, r0, lsr r2 231 mov r0, r0, lsr r2
232 mov pc, lr 232 ret lr
233 233
234UNWIND(.fnend) 234UNWIND(.fnend)
235ENDPROC(__udivsi3) 235ENDPROC(__udivsi3)
@@ -244,11 +244,11 @@ UNWIND(.fnstart)
244 moveq r0, #0 244 moveq r0, #0
245 tsthi r1, r2 @ see if divisor is power of 2 245 tsthi r1, r2 @ see if divisor is power of 2
246 andeq r0, r0, r2 246 andeq r0, r0, r2
247 movls pc, lr 247 retls lr
248 248
249 ARM_MOD_BODY r0, r1, r2, r3 249 ARM_MOD_BODY r0, r1, r2, r3
250 250
251 mov pc, lr 251 ret lr
252 252
253UNWIND(.fnend) 253UNWIND(.fnend)
254ENDPROC(__umodsi3) 254ENDPROC(__umodsi3)
@@ -274,23 +274,23 @@ UNWIND(.fnstart)
274 274
275 cmp ip, #0 275 cmp ip, #0
276 rsbmi r0, r0, #0 276 rsbmi r0, r0, #0
277 mov pc, lr 277 ret lr
278 278
27910: teq ip, r0 @ same sign ? 27910: teq ip, r0 @ same sign ?
280 rsbmi r0, r0, #0 280 rsbmi r0, r0, #0
281 mov pc, lr 281 ret lr
282 282
28311: movlo r0, #0 28311: movlo r0, #0
284 moveq r0, ip, asr #31 284 moveq r0, ip, asr #31
285 orreq r0, r0, #1 285 orreq r0, r0, #1
286 mov pc, lr 286 ret lr
287 287
28812: ARM_DIV2_ORDER r1, r2 28812: ARM_DIV2_ORDER r1, r2
289 289
290 cmp ip, #0 290 cmp ip, #0
291 mov r0, r3, lsr r2 291 mov r0, r3, lsr r2
292 rsbmi r0, r0, #0 292 rsbmi r0, r0, #0
293 mov pc, lr 293 ret lr
294 294
295UNWIND(.fnend) 295UNWIND(.fnend)
296ENDPROC(__divsi3) 296ENDPROC(__divsi3)
@@ -315,7 +315,7 @@ UNWIND(.fnstart)
315 315
31610: cmp ip, #0 31610: cmp ip, #0
317 rsbmi r0, r0, #0 317 rsbmi r0, r0, #0
318 mov pc, lr 318 ret lr
319 319
320UNWIND(.fnend) 320UNWIND(.fnend)
321ENDPROC(__modsi3) 321ENDPROC(__modsi3)
@@ -331,7 +331,7 @@ UNWIND(.save {r0, r1, ip, lr} )
331 ldmfd sp!, {r1, r2, ip, lr} 331 ldmfd sp!, {r1, r2, ip, lr}
332 mul r3, r0, r2 332 mul r3, r0, r2
333 sub r1, r1, r3 333 sub r1, r1, r3
334 mov pc, lr 334 ret lr
335 335
336UNWIND(.fnend) 336UNWIND(.fnend)
337ENDPROC(__aeabi_uidivmod) 337ENDPROC(__aeabi_uidivmod)
@@ -344,7 +344,7 @@ UNWIND(.save {r0, r1, ip, lr} )
344 ldmfd sp!, {r1, r2, ip, lr} 344 ldmfd sp!, {r1, r2, ip, lr}
345 mul r3, r0, r2 345 mul r3, r0, r2
346 sub r1, r1, r3 346 sub r1, r1, r3
347 mov pc, lr 347 ret lr
348 348
349UNWIND(.fnend) 349UNWIND(.fnend)
350ENDPROC(__aeabi_idivmod) 350ENDPROC(__aeabi_idivmod)
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S
index f83d449141f7..922dcd88b02b 100644
--- a/arch/arm/lib/lshrdi3.S
+++ b/arch/arm/lib/lshrdi3.S
@@ -27,6 +27,7 @@ Boston, MA 02110-1301, USA. */
27 27
28 28
29#include <linux/linkage.h> 29#include <linux/linkage.h>
30#include <asm/assembler.h>
30 31
31#ifdef __ARMEB__ 32#ifdef __ARMEB__
32#define al r1 33#define al r1
@@ -47,7 +48,7 @@ ENTRY(__aeabi_llsr)
47 THUMB( lslmi r3, ah, ip ) 48 THUMB( lslmi r3, ah, ip )
48 THUMB( orrmi al, al, r3 ) 49 THUMB( orrmi al, al, r3 )
49 mov ah, ah, lsr r2 50 mov ah, ah, lsr r2
50 mov pc, lr 51 ret lr
51 52
52ENDPROC(__lshrdi3) 53ENDPROC(__lshrdi3)
53ENDPROC(__aeabi_llsr) 54ENDPROC(__aeabi_llsr)
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S
index 1da86991d700..74a5bed6d999 100644
--- a/arch/arm/lib/memchr.S
+++ b/arch/arm/lib/memchr.S
@@ -22,5 +22,5 @@ ENTRY(memchr)
22 bne 1b 22 bne 1b
23 sub r0, r0, #1 23 sub r0, r0, #1
242: movne r0, #0 242: movne r0, #0
25 mov pc, lr 25 ret lr
26ENDPROC(memchr) 26ENDPROC(memchr)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 94b0650ea98f..671455c854fa 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -110,7 +110,7 @@ ENTRY(memset)
110 strneb r1, [ip], #1 110 strneb r1, [ip], #1
111 tst r2, #1 111 tst r2, #1
112 strneb r1, [ip], #1 112 strneb r1, [ip], #1
113 mov pc, lr 113 ret lr
114 114
1156: subs r2, r2, #4 @ 1 do we have enough 1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with? 116 blt 5b @ 1 bytes to align with?
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 3fbdef5f802a..385ccb306fa2 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -121,5 +121,5 @@ ENTRY(__memzero)
121 strneb r2, [r0], #1 @ 1 121 strneb r2, [r0], #1 @ 1
122 tst r1, #1 @ 1 a byte left over 122 tst r1, #1 @ 1 a byte left over
123 strneb r2, [r0], #1 @ 1 123 strneb r2, [r0], #1 @ 1
124 mov pc, lr @ 1 124 ret lr @ 1
125ENDPROC(__memzero) 125ENDPROC(__memzero)
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S
index 36c91b4957e2..204305956925 100644
--- a/arch/arm/lib/muldi3.S
+++ b/arch/arm/lib/muldi3.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -41,7 +42,7 @@ ENTRY(__aeabi_lmul)
41 adc xh, xh, yh, lsr #16 42 adc xh, xh, yh, lsr #16
42 adds xl, xl, ip, lsl #16 43 adds xl, xl, ip, lsl #16
43 adc xh, xh, ip, lsr #16 44 adc xh, xh, ip, lsr #16
44 mov pc, lr 45 ret lr
45 46
46ENDPROC(__muldi3) 47ENDPROC(__muldi3)
47ENDPROC(__aeabi_lmul) 48ENDPROC(__aeabi_lmul)
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S
index 3d73dcb959b0..38d660d3705f 100644
--- a/arch/arm/lib/putuser.S
+++ b/arch/arm/lib/putuser.S
@@ -36,7 +36,7 @@ ENTRY(__put_user_1)
36 check_uaccess r0, 1, r1, ip, __put_user_bad 36 check_uaccess r0, 1, r1, ip, __put_user_bad
371: TUSER(strb) r2, [r0] 371: TUSER(strb) r2, [r0]
38 mov r0, #0 38 mov r0, #0
39 mov pc, lr 39 ret lr
40ENDPROC(__put_user_1) 40ENDPROC(__put_user_1)
41 41
42ENTRY(__put_user_2) 42ENTRY(__put_user_2)
@@ -60,14 +60,14 @@ ENTRY(__put_user_2)
60#endif 60#endif
61#endif /* CONFIG_THUMB2_KERNEL */ 61#endif /* CONFIG_THUMB2_KERNEL */
62 mov r0, #0 62 mov r0, #0
63 mov pc, lr 63 ret lr
64ENDPROC(__put_user_2) 64ENDPROC(__put_user_2)
65 65
66ENTRY(__put_user_4) 66ENTRY(__put_user_4)
67 check_uaccess r0, 4, r1, ip, __put_user_bad 67 check_uaccess r0, 4, r1, ip, __put_user_bad
684: TUSER(str) r2, [r0] 684: TUSER(str) r2, [r0]
69 mov r0, #0 69 mov r0, #0
70 mov pc, lr 70 ret lr
71ENDPROC(__put_user_4) 71ENDPROC(__put_user_4)
72 72
73ENTRY(__put_user_8) 73ENTRY(__put_user_8)
@@ -80,12 +80,12 @@ ENTRY(__put_user_8)
806: TUSER(str) r3, [r0] 806: TUSER(str) r3, [r0]
81#endif 81#endif
82 mov r0, #0 82 mov r0, #0
83 mov pc, lr 83 ret lr
84ENDPROC(__put_user_8) 84ENDPROC(__put_user_8)
85 85
86__put_user_bad: 86__put_user_bad:
87 mov r0, #-EFAULT 87 mov r0, #-EFAULT
88 mov pc, lr 88 ret lr
89ENDPROC(__put_user_bad) 89ENDPROC(__put_user_bad)
90 90
91.pushsection __ex_table, "a" 91.pushsection __ex_table, "a"
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S
index d8f2a1c1aea4..013d64c71e8d 100644
--- a/arch/arm/lib/strchr.S
+++ b/arch/arm/lib/strchr.S
@@ -23,5 +23,5 @@ ENTRY(strchr)
23 teq r2, r1 23 teq r2, r1
24 movne r0, #0 24 movne r0, #0
25 subeq r0, r0, #1 25 subeq r0, r0, #1
26 mov pc, lr 26 ret lr
27ENDPROC(strchr) 27ENDPROC(strchr)
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S
index 302f20cd2423..3cec1c7482c4 100644
--- a/arch/arm/lib/strrchr.S
+++ b/arch/arm/lib/strrchr.S
@@ -22,5 +22,5 @@ ENTRY(strrchr)
22 teq r2, #0 22 teq r2, #0
23 bne 1b 23 bne 1b
24 mov r0, r3 24 mov r0, r3
25 mov pc, lr 25 ret lr
26ENDPROC(strrchr) 26ENDPROC(strrchr)
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S
index f0df6a91db04..ad4a6309141a 100644
--- a/arch/arm/lib/ucmpdi2.S
+++ b/arch/arm/lib/ucmpdi2.S
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h>
14 15
15#ifdef __ARMEB__ 16#ifdef __ARMEB__
16#define xh r0 17#define xh r0
@@ -31,7 +32,7 @@ ENTRY(__ucmpdi2)
31 movlo r0, #0 32 movlo r0, #0
32 moveq r0, #1 33 moveq r0, #1
33 movhi r0, #2 34 movhi r0, #2
34 mov pc, lr 35 ret lr
35 36
36ENDPROC(__ucmpdi2) 37ENDPROC(__ucmpdi2)
37 38
@@ -44,7 +45,7 @@ ENTRY(__aeabi_ulcmp)
44 movlo r0, #-1 45 movlo r0, #-1
45 moveq r0, #0 46 moveq r0, #0
46 movhi r0, #1 47 movhi r0, #1
47 mov pc, lr 48 ret lr
48 49
49ENDPROC(__aeabi_ulcmp) 50ENDPROC(__aeabi_ulcmp)
50 51
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index d4e9316ecacb..a5336a5e2739 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -213,7 +213,7 @@ ddr2clk_stop_done:
213 cmp ip, r0 213 cmp ip, r0
214 bne ddr2clk_stop_done 214 bne ddr2clk_stop_done
215 215
216 mov pc, lr 216 ret lr
217ENDPROC(davinci_ddr_psc_config) 217ENDPROC(davinci_ddr_psc_config)
218 218
219CACHE_FLUSH: 219CACHE_FLUSH:
diff --git a/arch/arm/mach-ebsa110/include/mach/memory.h b/arch/arm/mach-ebsa110/include/mach/memory.h
index 8e49066ad850..866f8a1c6ff7 100644
--- a/arch/arm/mach-ebsa110/include/mach/memory.h
+++ b/arch/arm/mach-ebsa110/include/mach/memory.h
@@ -17,11 +17,6 @@
17#define __ASM_ARCH_MEMORY_H 17#define __ASM_ARCH_MEMORY_H
18 18
19/* 19/*
20 * Physical DRAM offset.
21 */
22#define PLAT_PHYS_OFFSET UL(0x00000000)
23
24/*
25 * Cache flushing area - SRAM 20 * Cache flushing area - SRAM
26 */ 21 */
27#define FLUSH_BASE_PHYS 0x40000000 22#define FLUSH_BASE_PHYS 0x40000000
diff --git a/arch/arm/mach-ep93xx/crunch-bits.S b/arch/arm/mach-ep93xx/crunch-bits.S
index e96923a3017b..ee0be2af5c61 100644
--- a/arch/arm/mach-ep93xx/crunch-bits.S
+++ b/arch/arm/mach-ep93xx/crunch-bits.S
@@ -198,7 +198,7 @@ crunch_load:
198 get_thread_info r10 198 get_thread_info r10
199#endif 199#endif
2002: dec_preempt_count r10, r3 2002: dec_preempt_count r10, r3
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * Back up crunch regs to save area and disable access to them 204 * Back up crunch regs to save area and disable access to them
@@ -277,7 +277,7 @@ ENTRY(crunch_task_copy)
277 mov r3, lr @ preserve return address 277 mov r3, lr @ preserve return address
278 bl crunch_save 278 bl crunch_save
279 msr cpsr_c, ip @ restore interrupt mode 279 msr cpsr_c, ip @ restore interrupt mode
280 mov pc, r3 280 ret r3
281 281
282/* 282/*
283 * Restore crunch state from given memory address 283 * Restore crunch state from given memory address
@@ -310,4 +310,4 @@ ENTRY(crunch_task_restore)
310 mov r3, lr @ preserve return address 310 mov r3, lr @ preserve return address
311 bl crunch_load 311 bl crunch_load
312 msr cpsr_c, ip @ restore interrupt mode 312 msr cpsr_c, ip @ restore interrupt mode
313 mov pc, r3 313 ret r3
diff --git a/arch/arm/mach-ep93xx/include/mach/memory.h b/arch/arm/mach-ep93xx/include/mach/memory.h
deleted file mode 100644
index c9400cf0051c..000000000000
--- a/arch/arm/mach-ep93xx/include/mach/memory.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * arch/arm/mach-ep93xx/include/mach/memory.h
3 */
4
5#ifndef __ASM_ARCH_MEMORY_H
6#define __ASM_ARCH_MEMORY_H
7
8#if defined(CONFIG_EP93XX_SDCE3_SYNC_PHYS_OFFSET)
9#define PLAT_PHYS_OFFSET UL(0x00000000)
10#elif defined(CONFIG_EP93XX_SDCE0_PHYS_OFFSET)
11#define PLAT_PHYS_OFFSET UL(0xc0000000)
12#elif defined(CONFIG_EP93XX_SDCE1_PHYS_OFFSET)
13#define PLAT_PHYS_OFFSET UL(0xd0000000)
14#elif defined(CONFIG_EP93XX_SDCE2_PHYS_OFFSET)
15#define PLAT_PHYS_OFFSET UL(0xe0000000)
16#elif defined(CONFIG_EP93XX_SDCE3_ASYNC_PHYS_OFFSET)
17#define PLAT_PHYS_OFFSET UL(0xf0000000)
18#else
19#error "Kconfig bug: No EP93xx PHYS_OFFSET set"
20#endif
21
22#endif
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 8f9b66c4ac78..5d4ff6571dcd 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -119,6 +119,7 @@ config EXYNOS5420_MCPM
119 bool "Exynos5420 Multi-Cluster PM support" 119 bool "Exynos5420 Multi-Cluster PM support"
120 depends on MCPM && SOC_EXYNOS5420 120 depends on MCPM && SOC_EXYNOS5420
121 select ARM_CCI 121 select ARM_CCI
122 select ARM_CPU_SUSPEND
122 help 123 help
123 This is needed to provide CPU and cluster power management 124 This is needed to provide CPU and cluster power management
124 on Exynos5420 implementing big.LITTLE. 125 on Exynos5420 implementing big.LITTLE.
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 69fa48397394..8a134d019cb3 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -46,13 +46,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
46 if (cpu == 1) 46 if (cpu == 1)
47 exynos_cpu_power_down(cpu); 47 exynos_cpu_power_down(cpu);
48 48
49 /* 49 wfi();
50 * here's the WFI
51 */
52 asm(".word 0xe320f003\n"
53 :
54 :
55 : "memory", "cc");
56 50
57 if (pen_release == cpu_logical_map(cpu)) { 51 if (pen_release == cpu_logical_map(cpu)) {
58 /* 52 /*
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 0498d0b887ef..a96b78f93f2b 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -25,7 +25,6 @@
25 25
26#define EXYNOS5420_CPUS_PER_CLUSTER 4 26#define EXYNOS5420_CPUS_PER_CLUSTER 4
27#define EXYNOS5420_NR_CLUSTERS 2 27#define EXYNOS5420_NR_CLUSTERS 2
28#define MCPM_BOOT_ADDR_OFFSET 0x1c
29 28
30/* 29/*
31 * The common v7_exit_coherency_flush API could not be used because of the 30 * The common v7_exit_coherency_flush API could not be used because of the
@@ -197,7 +196,7 @@ static void exynos_power_down(void)
197 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 196 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
198 arch_spin_unlock(&exynos_mcpm_lock); 197 arch_spin_unlock(&exynos_mcpm_lock);
199 198
200 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 199 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
201 /* 200 /*
202 * On the Cortex-A15 we need to disable 201 * On the Cortex-A15 we need to disable
203 * L2 prefetching before flushing the cache. 202 * L2 prefetching before flushing the cache.
@@ -290,6 +289,19 @@ static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
290 "b cci_enable_port_for_self"); 289 "b cci_enable_port_for_self");
291} 290}
292 291
292static void __init exynos_cache_off(void)
293{
294 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
295 /* disable L2 prefetching on the Cortex-A15 */
296 asm volatile(
297 "mcr p15, 1, %0, c15, c0, 3\n\t"
298 "isb\n\t"
299 "dsb"
300 : : "r" (0x400));
301 }
302 exynos_v7_exit_coherency_flush(all);
303}
304
293static const struct of_device_id exynos_dt_mcpm_match[] = { 305static const struct of_device_id exynos_dt_mcpm_match[] = {
294 { .compatible = "samsung,exynos5420" }, 306 { .compatible = "samsung,exynos5420" },
295 { .compatible = "samsung,exynos5800" }, 307 { .compatible = "samsung,exynos5800" },
@@ -333,6 +345,8 @@ static int __init exynos_mcpm_init(void)
333 ret = mcpm_platform_register(&exynos_power_ops); 345 ret = mcpm_platform_register(&exynos_power_ops);
334 if (!ret) 346 if (!ret)
335 ret = mcpm_sync_init(exynos_pm_power_up_setup); 347 ret = mcpm_sync_init(exynos_pm_power_up_setup);
348 if (!ret)
349 ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
336 if (ret) { 350 if (ret) {
337 iounmap(ns_sram_base_addr); 351 iounmap(ns_sram_base_addr);
338 return ret; 352 return ret;
@@ -343,11 +357,13 @@ static int __init exynos_mcpm_init(void)
343 pr_info("Exynos MCPM support installed\n"); 357 pr_info("Exynos MCPM support installed\n");
344 358
345 /* 359 /*
346 * Future entries into the kernel can now go 360 * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
347 * through the cluster entry vectors. 361 * as part of secondary_cpu_start(). Let's redirect it to the
362 * mcpm_entry_point().
348 */ 363 */
349 __raw_writel(virt_to_phys(mcpm_entry_point), 364 __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */
350 ns_sram_base_addr + MCPM_BOOT_ADDR_OFFSET); 365 __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */
366 __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
351 367
352 iounmap(ns_sram_base_addr); 368 iounmap(ns_sram_base_addr);
353 369
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..8dc1d3a3a8bf 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -188,7 +188,7 @@ static void __init exynos_smp_init_cpus(void)
188 void __iomem *scu_base = scu_base_addr(); 188 void __iomem *scu_base = scu_base_addr();
189 unsigned int i, ncores; 189 unsigned int i, ncores;
190 190
191 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 191 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
192 ncores = scu_base ? scu_get_core_count(scu_base) : 1; 192 ncores = scu_base ? scu_get_core_count(scu_base) : 1;
193 else 193 else
194 /* 194 /*
@@ -214,7 +214,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
214 214
215 exynos_sysram_init(); 215 exynos_sysram_init();
216 216
217 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9) 217 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
218 scu_enable(scu_base_addr()); 218 scu_enable(scu_base_addr());
219 219
220 /* 220 /*
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 87c0d34c7fba..67d383de614f 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); 300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); 301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
302 302
303 if (!soc_is_exynos5250()) 303 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
304 exynos_cpu_save_register(); 304 exynos_cpu_save_register();
305 305
306 return 0; 306 return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
334 if (exynos_pm_central_resume()) 334 if (exynos_pm_central_resume())
335 goto early_wakeup; 335 goto early_wakeup;
336 336
337 if (!soc_is_exynos5250()) 337 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
338 exynos_cpu_restore_register(); 338 exynos_cpu_restore_register();
339 339
340 /* For release retention */ 340 /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
353 353
354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
355 355
356 if (!soc_is_exynos5250()) 356 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
357 scu_enable(S5P_VA_SCU); 357 scu_enable(S5P_VA_SCU);
358 358
359early_wakeup: 359early_wakeup:
@@ -440,15 +440,17 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
440 case CPU_PM_ENTER: 440 case CPU_PM_ENTER:
441 if (cpu == 0) { 441 if (cpu == 0) {
442 exynos_pm_central_suspend(); 442 exynos_pm_central_suspend();
443 exynos_cpu_save_register(); 443 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
444 exynos_cpu_save_register();
444 } 445 }
445 break; 446 break;
446 447
447 case CPU_PM_EXIT: 448 case CPU_PM_EXIT:
448 if (cpu == 0) { 449 if (cpu == 0) {
449 if (!soc_is_exynos5250()) 450 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) {
450 scu_enable(S5P_VA_SCU); 451 scu_enable(S5P_VA_SCU);
451 exynos_cpu_restore_register(); 452 exynos_cpu_restore_register();
453 }
452 exynos_pm_central_resume(); 454 exynos_pm_central_resume();
453 } 455 }
454 break; 456 break;
diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h
index 5c6df377f969..6f2ecccdf323 100644
--- a/arch/arm/mach-footbridge/include/mach/memory.h
+++ b/arch/arm/mach-footbridge/include/mach/memory.h
@@ -59,11 +59,6 @@ extern unsigned long __bus_to_pfn(unsigned long);
59 */ 59 */
60#define FLUSH_BASE 0xf9000000 60#define FLUSH_BASE 0xf9000000
61 61
62/*
63 * Physical DRAM offset.
64 */
65#define PLAT_PHYS_OFFSET UL(0x00000000)
66
67#define FLUSH_BASE_PHYS 0x50000000 62#define FLUSH_BASE_PHYS 0x50000000
68 63
69#endif 64#endif
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 21cf06cebade..5408ca70c8d6 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -312,6 +312,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
312 clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); 312 clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
313 clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); 313 clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
314 clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); 314 clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
315 clks[IMX6SL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
315 clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); 316 clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12);
316 clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); 317 clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14);
317 clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16); 318 clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16);
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index fe123b079c05..74b50f1982db 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
14#include <asm/hardware/cache-l2x0.h> 15#include <asm/hardware/cache-l2x0.h>
15#include "hardware.h" 16#include "hardware.h"
@@ -301,7 +302,7 @@ rbc_loop:
301 resume_mmdc 302 resume_mmdc
302 303
303 /* return to suspend finish */ 304 /* return to suspend finish */
304 mov pc, lr 305 ret lr
305 306
306resume: 307resume:
307 /* invalidate L1 I-cache first */ 308 /* invalidate L1 I-cache first */
@@ -325,7 +326,7 @@ resume:
325 mov r5, #0x1 326 mov r5, #0x1
326 resume_mmdc 327 resume_mmdc
327 328
328 mov pc, lr 329 ret lr
329ENDPROC(imx6_suspend) 330ENDPROC(imx6_suspend)
330 331
331/* 332/*
diff --git a/arch/arm/mach-integrator/include/mach/memory.h b/arch/arm/mach-integrator/include/mach/memory.h
index 334d5e271889..7268cb50ded0 100644
--- a/arch/arm/mach-integrator/include/mach/memory.h
+++ b/arch/arm/mach-integrator/include/mach/memory.h
@@ -20,11 +20,6 @@
20#ifndef __ASM_ARCH_MEMORY_H 20#ifndef __ASM_ARCH_MEMORY_H
21#define __ASM_ARCH_MEMORY_H 21#define __ASM_ARCH_MEMORY_H
22 22
23/*
24 * Physical DRAM offset.
25 */
26#define PLAT_PHYS_OFFSET UL(0x00000000)
27
28#define BUS_OFFSET UL(0x80000000) 23#define BUS_OFFSET UL(0x80000000)
29#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET) 24#define __virt_to_bus(x) ((x) - PAGE_OFFSET + BUS_OFFSET)
30#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET) 25#define __bus_to_virt(x) ((x) - BUS_OFFSET + PAGE_OFFSET)
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index dd0cc677d596..660ca6feff40 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -480,25 +480,18 @@ static const struct of_device_id ebi_match[] = {
480static void __init ap_init_of(void) 480static void __init ap_init_of(void)
481{ 481{
482 unsigned long sc_dec; 482 unsigned long sc_dec;
483 struct device_node *root;
484 struct device_node *syscon; 483 struct device_node *syscon;
485 struct device_node *ebi; 484 struct device_node *ebi;
486 struct device *parent; 485 struct device *parent;
487 struct soc_device *soc_dev; 486 struct soc_device *soc_dev;
488 struct soc_device_attribute *soc_dev_attr; 487 struct soc_device_attribute *soc_dev_attr;
489 u32 ap_sc_id; 488 u32 ap_sc_id;
490 int err;
491 int i; 489 int i;
492 490
493 /* Here we create an SoC device for the root node */ 491 syscon = of_find_matching_node(NULL, ap_syscon_match);
494 root = of_find_node_by_path("/");
495 if (!root)
496 return;
497
498 syscon = of_find_matching_node(root, ap_syscon_match);
499 if (!syscon) 492 if (!syscon)
500 return; 493 return;
501 ebi = of_find_matching_node(root, ebi_match); 494 ebi = of_find_matching_node(NULL, ebi_match);
502 if (!ebi) 495 if (!ebi)
503 return; 496 return;
504 497
@@ -509,19 +502,17 @@ static void __init ap_init_of(void)
509 if (!ebi_base) 502 if (!ebi_base)
510 return; 503 return;
511 504
505 of_platform_populate(NULL, of_default_bus_match_table,
506 ap_auxdata_lookup, NULL);
507
512 ap_sc_id = readl(ap_syscon_base); 508 ap_sc_id = readl(ap_syscon_base);
513 509
514 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 510 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
515 if (!soc_dev_attr) 511 if (!soc_dev_attr)
516 return; 512 return;
517 513
518 err = of_property_read_string(root, "compatible", 514 soc_dev_attr->soc_id = "XVC";
519 &soc_dev_attr->soc_id); 515 soc_dev_attr->machine = "Integrator/AP";
520 if (err)
521 return;
522 err = of_property_read_string(root, "model", &soc_dev_attr->machine);
523 if (err)
524 return;
525 soc_dev_attr->family = "Integrator"; 516 soc_dev_attr->family = "Integrator";
526 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c", 517 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
527 'A' + (ap_sc_id & 0x0f)); 518 'A' + (ap_sc_id & 0x0f));
@@ -536,9 +527,6 @@ static void __init ap_init_of(void)
536 parent = soc_device_to_device(soc_dev); 527 parent = soc_device_to_device(soc_dev);
537 integrator_init_sysfs(parent, ap_sc_id); 528 integrator_init_sysfs(parent, ap_sc_id);
538 529
539 of_platform_populate(root, of_default_bus_match_table,
540 ap_auxdata_lookup, parent);
541
542 sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET); 530 sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
543 for (i = 0; i < 4; i++) { 531 for (i = 0; i < 4; i++) {
544 struct lm_device *lmdev; 532 struct lm_device *lmdev;
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a938242b0c95..0e57f8f820a5 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -279,20 +279,13 @@ static const struct of_device_id intcp_syscon_match[] = {
279 279
280static void __init intcp_init_of(void) 280static void __init intcp_init_of(void)
281{ 281{
282 struct device_node *root;
283 struct device_node *cpcon; 282 struct device_node *cpcon;
284 struct device *parent; 283 struct device *parent;
285 struct soc_device *soc_dev; 284 struct soc_device *soc_dev;
286 struct soc_device_attribute *soc_dev_attr; 285 struct soc_device_attribute *soc_dev_attr;
287 u32 intcp_sc_id; 286 u32 intcp_sc_id;
288 int err;
289 287
290 /* Here we create an SoC device for the root node */ 288 cpcon = of_find_matching_node(NULL, intcp_syscon_match);
291 root = of_find_node_by_path("/");
292 if (!root)
293 return;
294
295 cpcon = of_find_matching_node(root, intcp_syscon_match);
296 if (!cpcon) 289 if (!cpcon)
297 return; 290 return;
298 291
@@ -300,19 +293,17 @@ static void __init intcp_init_of(void)
300 if (!intcp_con_base) 293 if (!intcp_con_base)
301 return; 294 return;
302 295
296 of_platform_populate(NULL, of_default_bus_match_table,
297 intcp_auxdata_lookup, NULL);
298
303 intcp_sc_id = readl(intcp_con_base); 299 intcp_sc_id = readl(intcp_con_base);
304 300
305 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 301 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
306 if (!soc_dev_attr) 302 if (!soc_dev_attr)
307 return; 303 return;
308 304
309 err = of_property_read_string(root, "compatible", 305 soc_dev_attr->soc_id = "XCV";
310 &soc_dev_attr->soc_id); 306 soc_dev_attr->machine = "Integrator/CP";
311 if (err)
312 return;
313 err = of_property_read_string(root, "model", &soc_dev_attr->machine);
314 if (err)
315 return;
316 soc_dev_attr->family = "Integrator"; 307 soc_dev_attr->family = "Integrator";
317 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c", 308 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
318 'A' + (intcp_sc_id & 0x0f)); 309 'A' + (intcp_sc_id & 0x0f));
@@ -326,8 +317,6 @@ static void __init intcp_init_of(void)
326 317
327 parent = soc_device_to_device(soc_dev); 318 parent = soc_device_to_device(soc_dev);
328 integrator_init_sysfs(parent, intcp_sc_id); 319 integrator_init_sysfs(parent, intcp_sc_id);
329 of_platform_populate(root, of_default_bus_match_table,
330 intcp_auxdata_lookup, parent);
331} 320}
332 321
333static const char * intcp_dt_board_compat[] = { 322static const char * intcp_dt_board_compat[] = {
diff --git a/arch/arm/mach-iop13xx/include/mach/iop13xx.h b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
index 17b40279e0a4..9311ee2126d6 100644
--- a/arch/arm/mach-iop13xx/include/mach/iop13xx.h
+++ b/arch/arm/mach-iop13xx/include/mach/iop13xx.h
@@ -3,7 +3,7 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6#include <linux/reboot.h> 6enum reboot_mode;
7 7
8/* The ATU offsets can change based on the strapping */ 8/* The ATU offsets can change based on the strapping */
9extern u32 iop13xx_atux_pmmr_offset; 9extern u32 iop13xx_atux_pmmr_offset;
diff --git a/arch/arm/mach-iop13xx/include/mach/memory.h b/arch/arm/mach-iop13xx/include/mach/memory.h
index 7c032d0ab24a..59307e787588 100644
--- a/arch/arm/mach-iop13xx/include/mach/memory.h
+++ b/arch/arm/mach-iop13xx/include/mach/memory.h
@@ -3,11 +3,6 @@
3 3
4#include <mach/hardware.h> 4#include <mach/hardware.h>
5 5
6/*
7 * Physical DRAM offset.
8 */
9#define PLAT_PHYS_OFFSET UL(0x00000000)
10
11#ifndef __ASSEMBLY__ 6#ifndef __ASSEMBLY__
12 7
13#if defined(CONFIG_ARCH_IOP13XX) 8#if defined(CONFIG_ARCH_IOP13XX)
diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c
index bca96f433495..53c316f7301e 100644
--- a/arch/arm/mach-iop13xx/setup.c
+++ b/arch/arm/mach-iop13xx/setup.c
@@ -20,6 +20,7 @@
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <linux/serial_8250.h> 21#include <linux/serial_8250.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/reboot.h>
23#ifdef CONFIG_MTD_PHYSMAP 24#ifdef CONFIG_MTD_PHYSMAP
24#include <linux/mtd/physmap.h> 25#include <linux/mtd/physmap.h>
25#endif 26#endif
diff --git a/arch/arm/mach-ks8695/include/mach/memory.h b/arch/arm/mach-ks8695/include/mach/memory.h
index 95e731a7ed6a..ab0d27fa8969 100644
--- a/arch/arm/mach-ks8695/include/mach/memory.h
+++ b/arch/arm/mach-ks8695/include/mach/memory.h
@@ -15,11 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18/*
19 * Physical SRAM offset.
20 */
21#define PLAT_PHYS_OFFSET KS8695_SDRAM_PA
22
23#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
24 19
25#ifdef CONFIG_PCI 20#ifdef CONFIG_PCI
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 4a7c250c9a30..b9bc599a5fd0 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -10,6 +10,7 @@ menuconfig ARCH_MVEBU
10 select ZONE_DMA if ARM_LPAE 10 select ZONE_DMA if ARM_LPAE
11 select ARCH_REQUIRE_GPIOLIB 11 select ARCH_REQUIRE_GPIOLIB
12 select PCI_QUIRKS if PCI 12 select PCI_QUIRKS if PCI
13 select OF_ADDRESS_PCI
13 14
14if ARCH_MVEBU 15if ARCH_MVEBU
15 16
@@ -17,6 +18,7 @@ config MACH_MVEBU_V7
17 bool 18 bool
18 select ARMADA_370_XP_TIMER 19 select ARMADA_370_XP_TIMER
19 select CACHE_L2X0 20 select CACHE_L2X0
21 select ARM_CPU_SUSPEND
20 22
21config MACH_ARMADA_370 23config MACH_ARMADA_370
22 bool "Marvell Armada 370 boards" if ARCH_MULTI_V7 24 bool "Marvell Armada 370 boards" if ARCH_MULTI_V7
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S
index 510c29e079ca..f5d881b5d0f7 100644
--- a/arch/arm/mach-mvebu/coherency_ll.S
+++ b/arch/arm/mach-mvebu/coherency_ll.S
@@ -46,7 +46,7 @@ ENTRY(ll_get_coherency_base)
46 ldr r1, =coherency_base 46 ldr r1, =coherency_base
47 ldr r1, [r1] 47 ldr r1, [r1]
482: 482:
49 mov pc, lr 49 ret lr
50ENDPROC(ll_get_coherency_base) 50ENDPROC(ll_get_coherency_base)
51 51
52/* 52/*
@@ -63,7 +63,7 @@ ENTRY(ll_get_coherency_cpumask)
63 mov r2, #(1 << 24) 63 mov r2, #(1 << 24)
64 lsl r3, r2, r3 64 lsl r3, r2, r3
65ARM_BE8(rev r3, r3) 65ARM_BE8(rev r3, r3)
66 mov pc, lr 66 ret lr
67ENDPROC(ll_get_coherency_cpumask) 67ENDPROC(ll_get_coherency_cpumask)
68 68
69/* 69/*
@@ -94,7 +94,7 @@ ENTRY(ll_add_cpu_to_smp_group)
94 strex r1, r2, [r0] 94 strex r1, r2, [r0]
95 cmp r1, #0 95 cmp r1, #0
96 bne 1b 96 bne 1b
97 mov pc, lr 97 ret lr
98ENDPROC(ll_add_cpu_to_smp_group) 98ENDPROC(ll_add_cpu_to_smp_group)
99 99
100ENTRY(ll_enable_coherency) 100ENTRY(ll_enable_coherency)
@@ -118,7 +118,7 @@ ENTRY(ll_enable_coherency)
118 bne 1b 118 bne 1b
119 dsb 119 dsb
120 mov r0, #0 120 mov r0, #0
121 mov pc, lr 121 ret lr
122ENDPROC(ll_enable_coherency) 122ENDPROC(ll_enable_coherency)
123 123
124ENTRY(ll_disable_coherency) 124ENTRY(ll_disable_coherency)
@@ -141,7 +141,7 @@ ENTRY(ll_disable_coherency)
141 cmp r1, #0 141 cmp r1, #0
142 bne 1b 142 bne 1b
143 dsb 143 dsb
144 mov pc, lr 144 ret lr
145ENDPROC(ll_disable_coherency) 145ENDPROC(ll_disable_coherency)
146 146
147 .align 2 147 .align 2
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..7c91ddb6f1f7 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -14,6 +14,7 @@
14 14
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17 18
18 __CPUINIT 19 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 20#define CPU_RESUME_ADDR_REG 0xf10182d4
@@ -24,7 +25,7 @@
24armada_375_smp_cpu1_enable_code_start: 25armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 26 ldr r0, [pc, #4]
26 ldr r1, [r0] 27 ldr r1, [r0]
27 mov pc, r1 28 ret r1
28 .word CPU_RESUME_ADDR_REG 29 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 30armada_375_smp_cpu1_enable_code_end:
30 31
diff --git a/arch/arm/mach-omap1/include/mach/memory.h b/arch/arm/mach-omap1/include/mach/memory.h
index 3c2530523111..058a4f7d44c5 100644
--- a/arch/arm/mach-omap1/include/mach/memory.h
+++ b/arch/arm/mach-omap1/include/mach/memory.h
@@ -6,11 +6,6 @@
6#define __ASM_ARCH_MEMORY_H 6#define __ASM_ARCH_MEMORY_H
7 7
8/* 8/*
9 * Physical DRAM offset.
10 */
11#define PLAT_PHYS_OFFSET UL(0x10000000)
12
13/*
14 * Bus address is physical address, except for OMAP-1510 Local Bus. 9 * Bus address is physical address, except for OMAP-1510 Local Bus.
15 * OMAP-1510 bus address is translated into a Local Bus address if the 10 * OMAP-1510 bus address is translated into a Local Bus address if the
16 * OMAP bus type is lbus. We do the address translation based on the 11 * OMAP bus type is lbus. We do the address translation based on the
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index 9086ce03ae12..b84a0122d823 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h>
13#include <asm/smp_scu.h> 14#include <asm/smp_scu.h>
14#include <asm/memory.h> 15#include <asm/memory.h>
15#include <asm/hardware/cache-l2x0.h> 16#include <asm/hardware/cache-l2x0.h>
@@ -334,7 +335,7 @@ ENDPROC(omap4_cpu_resume)
334 335
335#ifndef CONFIG_OMAP4_ERRATA_I688 336#ifndef CONFIG_OMAP4_ERRATA_I688
336ENTRY(omap_bus_sync) 337ENTRY(omap_bus_sync)
337 mov pc, lr 338 ret lr
338ENDPROC(omap_bus_sync) 339ENDPROC(omap_bus_sync)
339#endif 340#endif
340 341
diff --git a/arch/arm/mach-omap2/sram242x.S b/arch/arm/mach-omap2/sram242x.S
index 680a7c56cc3e..2c88ff2d0236 100644
--- a/arch/arm/mach-omap2/sram242x.S
+++ b/arch/arm/mach-omap2/sram242x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap242x_sdi_cm_clksel2_pll: 130omap242x_sdi_cm_clksel2_pll:
131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap242x_srs_cm_clksel2_pll: 225omap242x_srs_cm_clksel2_pll:
226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2420_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-omap2/sram243x.S b/arch/arm/mach-omap2/sram243x.S
index a1e9edd673f4..d5deb9761fc7 100644
--- a/arch/arm/mach-omap2/sram243x.S
+++ b/arch/arm/mach-omap2/sram243x.S
@@ -101,7 +101,7 @@ i_dll_wait:
101i_dll_delay: 101i_dll_delay:
102 subs r4, r4, #0x1 102 subs r4, r4, #0x1
103 bne i_dll_delay 103 bne i_dll_delay
104 mov pc, lr 104 ret lr
105 105
106 /* 106 /*
107 * shift up or down voltage, use R9 as input to tell level. 107 * shift up or down voltage, use R9 as input to tell level.
@@ -125,7 +125,7 @@ volt_delay:
125 ldr r7, [r3] @ get timer value 125 ldr r7, [r3] @ get timer value
126 cmp r5, r7 @ time up? 126 cmp r5, r7 @ time up?
127 bhi volt_delay @ not yet->branch 127 bhi volt_delay @ not yet->branch
128 mov pc, lr @ back to caller. 128 ret lr @ back to caller.
129 129
130omap243x_sdi_cm_clksel2_pll: 130omap243x_sdi_cm_clksel2_pll:
131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 131 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
@@ -220,7 +220,7 @@ volt_delay_c:
220 ldr r7, [r10] @ get timer value 220 ldr r7, [r10] @ get timer value
221 cmp r8, r7 @ time up? 221 cmp r8, r7 @ time up?
222 bhi volt_delay_c @ not yet->branch 222 bhi volt_delay_c @ not yet->branch
223 mov pc, lr @ back to caller 223 ret lr @ back to caller
224 224
225omap243x_srs_cm_clksel2_pll: 225omap243x_srs_cm_clksel2_pll:
226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2) 226 .word OMAP2430_CM_REGADDR(PLL_MOD, CM_CLKSEL2)
diff --git a/arch/arm/mach-pxa/mioa701_bootresume.S b/arch/arm/mach-pxa/mioa701_bootresume.S
index 324d25a48c85..81591491ab94 100644
--- a/arch/arm/mach-pxa/mioa701_bootresume.S
+++ b/arch/arm/mach-pxa/mioa701_bootresume.S
@@ -29,7 +29,7 @@ ENTRY(mioa701_jumpaddr)
29 str r1, [r0] @ Early disable resume for next boot 29 str r1, [r0] @ Early disable resume for next boot
30 ldr r0, mioa701_jumpaddr @ (Murphy's Law) 30 ldr r0, mioa701_jumpaddr @ (Murphy's Law)
31 ldr r0, [r0] 31 ldr r0, [r0]
32 mov pc, r0 32 ret r0
332: 332:
34 34
35ENTRY(mioa701_bootstrap_lg) 35ENTRY(mioa701_bootstrap_lg)
diff --git a/arch/arm/mach-pxa/standby.S b/arch/arm/mach-pxa/standby.S
index 29f5f5c180b7..eab1645bb4ad 100644
--- a/arch/arm/mach-pxa/standby.S
+++ b/arch/arm/mach-pxa/standby.S
@@ -29,7 +29,7 @@ ENTRY(pxa_cpu_standby)
29 .align 5 29 .align 5
301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby 301: mcr p14, 0, r2, c7, c0, 0 @ put the system into Standby
31 str r1, [r0] @ make sure PSSR_PH/STS are clear 31 str r1, [r0] @ make sure PSSR_PH/STS are clear
32 mov pc, lr 32 ret lr
33 33
34#endif 34#endif
35 35
@@ -108,7 +108,7 @@ ENTRY(pm_enter_standby_start)
108 bic r0, r0, #0x20000000 108 bic r0, r0, #0x20000000
109 str r0, [r1, #PXA3_DMCIER] 109 str r0, [r1, #PXA3_DMCIER]
110 110
111 mov pc, lr 111 ret lr
112ENTRY(pm_enter_standby_end) 112ENTRY(pm_enter_standby_end)
113 113
114#endif 114#endif
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index db09170e3832..23e7a313f75d 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -20,15 +20,6 @@
20#ifndef __ASM_ARCH_MEMORY_H 20#ifndef __ASM_ARCH_MEMORY_H
21#define __ASM_ARCH_MEMORY_H 21#define __ASM_ARCH_MEMORY_H
22 22
23/*
24 * Physical DRAM offset.
25 */
26#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
27#define PLAT_PHYS_OFFSET UL(0x70000000)
28#else
29#define PLAT_PHYS_OFFSET UL(0x00000000)
30#endif
31
32#ifdef CONFIG_SPARSEMEM 23#ifdef CONFIG_SPARSEMEM
33 24
34/* 25/*
diff --git a/arch/arm/mach-rpc/include/mach/memory.h b/arch/arm/mach-rpc/include/mach/memory.h
index 18a221093bf5..b7e49571417d 100644
--- a/arch/arm/mach-rpc/include/mach/memory.h
+++ b/arch/arm/mach-rpc/include/mach/memory.h
@@ -19,11 +19,6 @@
19#define __ASM_ARCH_MEMORY_H 19#define __ASM_ARCH_MEMORY_H
20 20
21/* 21/*
22 * Physical DRAM offset.
23 */
24#define PLAT_PHYS_OFFSET UL(0x10000000)
25
26/*
27 * Cache flushing area - ROM 22 * Cache flushing area - ROM
28 */ 23 */
29#define FLUSH_BASE_PHYS 0x00000000 24#define FLUSH_BASE_PHYS 0x00000000
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2410.S b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
index c9b91223697c..875ba8911127 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2410.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2410.S
@@ -66,4 +66,4 @@ s3c2410_do_sleep:
66 streq r8, [r5] @ SDRAM power-down config 66 streq r8, [r5] @ SDRAM power-down config
67 streq r9, [r6] @ CPU sleep 67 streq r9, [r6] @ CPU sleep
681: beq 1b 681: beq 1b
69 mov pc, r14 69 ret lr
diff --git a/arch/arm/mach-s3c24xx/sleep-s3c2412.S b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
index 5adaceb7da13..6bf5b4d8743c 100644
--- a/arch/arm/mach-s3c24xx/sleep-s3c2412.S
+++ b/arch/arm/mach-s3c24xx/sleep-s3c2412.S
@@ -65,4 +65,4 @@ s3c2412_sleep_enter1:
65 strne r9, [r3] 65 strne r9, [r3]
66 bne s3c2412_sleep_enter1 66 bne s3c2412_sleep_enter1
67 67
68 mov pc, r14 68 ret lr
diff --git a/arch/arm/mach-s5pv210/include/mach/memory.h b/arch/arm/mach-s5pv210/include/mach/memory.h
index 2d3cfa221d5f..d584fac9156b 100644
--- a/arch/arm/mach-s5pv210/include/mach/memory.h
+++ b/arch/arm/mach-s5pv210/include/mach/memory.h
@@ -13,8 +13,6 @@
13#ifndef __ASM_ARCH_MEMORY_H 13#ifndef __ASM_ARCH_MEMORY_H
14#define __ASM_ARCH_MEMORY_H 14#define __ASM_ARCH_MEMORY_H
15 15
16#define PLAT_PHYS_OFFSET UL(0x20000000)
17
18/* 16/*
19 * Sparsemem support 17 * Sparsemem support
20 * Physical memory can be located from 0x20000000 to 0x7fffffff, 18 * Physical memory can be located from 0x20000000 to 0x7fffffff,
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index 12d376795abc..2054051eb797 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -10,11 +10,6 @@
10#include <asm/sizes.h> 10#include <asm/sizes.h>
11 11
12/* 12/*
13 * Physical DRAM offset is 0xc0000000 on the SA1100
14 */
15#define PLAT_PHYS_OFFSET UL(0xc0000000)
16
17/*
18 * Because of the wide memory address space between physical RAM banks on the 13 * Because of the wide memory address space between physical RAM banks on the
19 * SA1100, it's much convenient to use Linux's SparseMEM support to implement 14 * SA1100, it's much convenient to use Linux's SparseMEM support to implement
20 * our memory map representation. Assuming all memory nodes have equal access 15 * our memory map representation. Assuming all memory nodes have equal access
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index e5be5c88644b..293007579b8e 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <asm/assembler.h>
15#include <asm/memory.h> 16#include <asm/memory.h>
16 17
17ENTRY(shmobile_invalidate_start) 18ENTRY(shmobile_invalidate_start)
@@ -75,7 +76,7 @@ shmobile_smp_boot_next:
75 76
76shmobile_smp_boot_found: 77shmobile_smp_boot_found:
77 ldr r0, [r7, r1, lsl #2] 78 ldr r0, [r7, r1, lsl #2]
78 mov pc, r9 79 ret r9
79ENDPROC(shmobile_smp_boot) 80ENDPROC(shmobile_smp_boot)
80 81
81ENTRY(shmobile_smp_sleep) 82ENTRY(shmobile_smp_sleep)
diff --git a/arch/arm/mach-tegra/sleep-tegra20.S b/arch/arm/mach-tegra/sleep-tegra20.S
index aaaf3abd2688..be4bc5f853f5 100644
--- a/arch/arm/mach-tegra/sleep-tegra20.S
+++ b/arch/arm/mach-tegra/sleep-tegra20.S
@@ -78,7 +78,7 @@ ENTRY(tegra20_hotplug_shutdown)
78 /* Put this CPU down */ 78 /* Put this CPU down */
79 cpu_id r0 79 cpu_id r0
80 bl tegra20_cpu_shutdown 80 bl tegra20_cpu_shutdown
81 mov pc, lr @ should never get here 81 ret lr @ should never get here
82ENDPROC(tegra20_hotplug_shutdown) 82ENDPROC(tegra20_hotplug_shutdown)
83 83
84/* 84/*
@@ -96,7 +96,7 @@ ENDPROC(tegra20_hotplug_shutdown)
96 */ 96 */
97ENTRY(tegra20_cpu_shutdown) 97ENTRY(tegra20_cpu_shutdown)
98 cmp r0, #0 98 cmp r0, #0
99 moveq pc, lr @ must not be called for CPU 0 99 reteq lr @ must not be called for CPU 0
100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 100 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
101 mov r12, #CPU_RESETTABLE 101 mov r12, #CPU_RESETTABLE
102 str r12, [r1] 102 str r12, [r1]
@@ -117,7 +117,7 @@ ENTRY(tegra20_cpu_shutdown)
117 cpu_id r3 117 cpu_id r3
118 cmp r3, r0 118 cmp r3, r0
119 beq . 119 beq .
120 mov pc, lr 120 ret lr
121ENDPROC(tegra20_cpu_shutdown) 121ENDPROC(tegra20_cpu_shutdown)
122#endif 122#endif
123 123
@@ -164,7 +164,7 @@ ENTRY(tegra_pen_lock)
164 cmpeq r12, r0 @ !turn == cpu? 164 cmpeq r12, r0 @ !turn == cpu?
165 beq 1b @ while !turn == cpu && flag[!cpu] == 1 165 beq 1b @ while !turn == cpu && flag[!cpu] == 1
166 166
167 mov pc, lr @ locked 167 ret lr @ locked
168ENDPROC(tegra_pen_lock) 168ENDPROC(tegra_pen_lock)
169 169
170ENTRY(tegra_pen_unlock) 170ENTRY(tegra_pen_unlock)
@@ -176,7 +176,7 @@ ENTRY(tegra_pen_unlock)
176 addne r2, r3, #PMC_SCRATCH39 176 addne r2, r3, #PMC_SCRATCH39
177 mov r12, #0 177 mov r12, #0
178 str r12, [r2] 178 str r12, [r2]
179 mov pc, lr 179 ret lr
180ENDPROC(tegra_pen_unlock) 180ENDPROC(tegra_pen_unlock)
181 181
182/* 182/*
@@ -189,7 +189,7 @@ ENTRY(tegra20_cpu_clear_resettable)
189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 189 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
190 mov r12, #CPU_NOT_RESETTABLE 190 mov r12, #CPU_NOT_RESETTABLE
191 str r12, [r1] 191 str r12, [r1]
192 mov pc, lr 192 ret lr
193ENDPROC(tegra20_cpu_clear_resettable) 193ENDPROC(tegra20_cpu_clear_resettable)
194 194
195/* 195/*
@@ -202,7 +202,7 @@ ENTRY(tegra20_cpu_set_resettable_soon)
202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41 202 mov32 r1, TEGRA_PMC_VIRT + PMC_SCRATCH41
203 mov r12, #CPU_RESETTABLE_SOON 203 mov r12, #CPU_RESETTABLE_SOON
204 str r12, [r1] 204 str r12, [r1]
205 mov pc, lr 205 ret lr
206ENDPROC(tegra20_cpu_set_resettable_soon) 206ENDPROC(tegra20_cpu_set_resettable_soon)
207 207
208/* 208/*
@@ -217,7 +217,7 @@ ENTRY(tegra20_cpu_is_resettable_soon)
217 cmp r12, #CPU_RESETTABLE_SOON 217 cmp r12, #CPU_RESETTABLE_SOON
218 moveq r0, #1 218 moveq r0, #1
219 movne r0, #0 219 movne r0, #0
220 mov pc, lr 220 ret lr
221ENDPROC(tegra20_cpu_is_resettable_soon) 221ENDPROC(tegra20_cpu_is_resettable_soon)
222 222
223/* 223/*
@@ -239,7 +239,7 @@ ENTRY(tegra20_sleep_core_finish)
239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 239 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
240 add r0, r0, r1 240 add r0, r0, r1
241 241
242 mov pc, r3 242 ret r3
243ENDPROC(tegra20_sleep_core_finish) 243ENDPROC(tegra20_sleep_core_finish)
244 244
245/* 245/*
@@ -402,7 +402,7 @@ exit_selfrefresh_loop:
402 402
403 mov32 r0, TEGRA_PMC_BASE 403 mov32 r0, TEGRA_PMC_BASE
404 ldr r0, [r0, #PMC_SCRATCH41] 404 ldr r0, [r0, #PMC_SCRATCH41]
405 mov pc, r0 @ jump to tegra_resume 405 ret r0 @ jump to tegra_resume
406ENDPROC(tegra20_lp1_reset) 406ENDPROC(tegra20_lp1_reset)
407 407
408/* 408/*
@@ -455,7 +455,7 @@ tegra20_switch_cpu_to_clk32k:
455 mov r0, #0 /* brust policy = 32KHz */ 455 mov r0, #0 /* brust policy = 32KHz */
456 str r0, [r5, #CLK_RESET_SCLK_BURST] 456 str r0, [r5, #CLK_RESET_SCLK_BURST]
457 457
458 mov pc, lr 458 ret lr
459 459
460/* 460/*
461 * tegra20_enter_sleep 461 * tegra20_enter_sleep
@@ -535,7 +535,7 @@ padsave_done:
535 adr r2, tegra20_sclk_save 535 adr r2, tegra20_sclk_save
536 str r0, [r2] 536 str r0, [r2]
537 dsb 537 dsb
538 mov pc, lr 538 ret lr
539 539
540tegra20_sdram_pad_address: 540tegra20_sdram_pad_address:
541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL 541 .word TEGRA_APB_MISC_BASE + APB_MISC_XM2CFGCPADCTRL
diff --git a/arch/arm/mach-tegra/sleep-tegra30.S b/arch/arm/mach-tegra/sleep-tegra30.S
index b16d4a57fa59..09cad9b071de 100644
--- a/arch/arm/mach-tegra/sleep-tegra30.S
+++ b/arch/arm/mach-tegra/sleep-tegra30.S
@@ -142,7 +142,7 @@ ENTRY(tegra30_hotplug_shutdown)
142 /* Powergate this CPU */ 142 /* Powergate this CPU */
143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN 143 mov r0, #TEGRA30_POWER_HOTPLUG_SHUTDOWN
144 bl tegra30_cpu_shutdown 144 bl tegra30_cpu_shutdown
145 mov pc, lr @ should never get here 145 ret lr @ should never get here
146ENDPROC(tegra30_hotplug_shutdown) 146ENDPROC(tegra30_hotplug_shutdown)
147 147
148/* 148/*
@@ -161,7 +161,7 @@ ENTRY(tegra30_cpu_shutdown)
161 bne _no_cpu0_chk @ It's not Tegra30 161 bne _no_cpu0_chk @ It's not Tegra30
162 162
163 cmp r3, #0 163 cmp r3, #0
164 moveq pc, lr @ Must never be called for CPU 0 164 reteq lr @ Must never be called for CPU 0
165_no_cpu0_chk: 165_no_cpu0_chk:
166 166
167 ldr r12, =TEGRA_FLOW_CTRL_VIRT 167 ldr r12, =TEGRA_FLOW_CTRL_VIRT
@@ -266,7 +266,7 @@ ENTRY(tegra30_sleep_core_finish)
266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA 266 mov32 r1, TEGRA_IRAM_LPx_RESUME_AREA
267 add r0, r0, r1 267 add r0, r0, r1
268 268
269 mov pc, r3 269 ret r3
270ENDPROC(tegra30_sleep_core_finish) 270ENDPROC(tegra30_sleep_core_finish)
271 271
272/* 272/*
@@ -285,7 +285,7 @@ ENTRY(tegra30_sleep_cpu_secondary_finish)
285 mov r0, #0 @ power mode flags (!hotplug) 285 mov r0, #0 @ power mode flags (!hotplug)
286 bl tegra30_cpu_shutdown 286 bl tegra30_cpu_shutdown
287 mov r0, #1 @ never return here 287 mov r0, #1 @ never return here
288 mov pc, r7 288 ret r7
289ENDPROC(tegra30_sleep_cpu_secondary_finish) 289ENDPROC(tegra30_sleep_cpu_secondary_finish)
290 290
291/* 291/*
@@ -529,7 +529,7 @@ __no_dual_emc_chanl:
529 529
530 mov32 r0, TEGRA_PMC_BASE 530 mov32 r0, TEGRA_PMC_BASE
531 ldr r0, [r0, #PMC_SCRATCH41] 531 ldr r0, [r0, #PMC_SCRATCH41]
532 mov pc, r0 @ jump to tegra_resume 532 ret r0 @ jump to tegra_resume
533ENDPROC(tegra30_lp1_reset) 533ENDPROC(tegra30_lp1_reset)
534 534
535 .align L1_CACHE_SHIFT 535 .align L1_CACHE_SHIFT
@@ -659,7 +659,7 @@ _no_pll_in_iddq:
659 mov r0, #0 /* brust policy = 32KHz */ 659 mov r0, #0 /* brust policy = 32KHz */
660 str r0, [r5, #CLK_RESET_SCLK_BURST] 660 str r0, [r5, #CLK_RESET_SCLK_BURST]
661 661
662 mov pc, lr 662 ret lr
663 663
664/* 664/*
665 * tegra30_enter_sleep 665 * tegra30_enter_sleep
@@ -819,7 +819,7 @@ pmc_io_dpd_skip:
819 819
820 dsb 820 dsb
821 821
822 mov pc, lr 822 ret lr
823 823
824 .ltorg 824 .ltorg
825/* dummy symbol for end of IRAM */ 825/* dummy symbol for end of IRAM */
diff --git a/arch/arm/mach-tegra/sleep.S b/arch/arm/mach-tegra/sleep.S
index 8d06213fbc47..f024a5109e8e 100644
--- a/arch/arm/mach-tegra/sleep.S
+++ b/arch/arm/mach-tegra/sleep.S
@@ -87,7 +87,7 @@ ENTRY(tegra_init_l2_for_a15)
87 mcrne p15, 0x1, r0, c9, c0, 2 87 mcrne p15, 0x1, r0, c9, c0, 2
88_exit_init_l2_a15: 88_exit_init_l2_a15:
89 89
90 mov pc, lr 90 ret lr
91ENDPROC(tegra_init_l2_for_a15) 91ENDPROC(tegra_init_l2_for_a15)
92 92
93/* 93/*
@@ -111,7 +111,7 @@ ENTRY(tegra_sleep_cpu_finish)
111 add r3, r3, r0 111 add r3, r3, r0
112 mov r0, r1 112 mov r0, r1
113 113
114 mov pc, r3 114 ret r3
115ENDPROC(tegra_sleep_cpu_finish) 115ENDPROC(tegra_sleep_cpu_finish)
116 116
117/* 117/*
@@ -139,7 +139,7 @@ ENTRY(tegra_shut_off_mmu)
139 moveq r3, #0 139 moveq r3, #0
140 streq r3, [r2, #L2X0_CTRL] 140 streq r3, [r2, #L2X0_CTRL]
141#endif 141#endif
142 mov pc, r0 142 ret r0
143ENDPROC(tegra_shut_off_mmu) 143ENDPROC(tegra_shut_off_mmu)
144 .popsection 144 .popsection
145 145
@@ -156,6 +156,6 @@ ENTRY(tegra_switch_cpu_to_pllp)
156 str r0, [r5, #CLK_RESET_CCLK_BURST] 156 str r0, [r5, #CLK_RESET_CCLK_BURST]
157 mov r0, #0 157 mov r0, #0
158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER] 158 str r0, [r5, #CLK_RESET_CCLK_DIVIDER]
159 mov pc, lr 159 ret lr
160ENDPROC(tegra_switch_cpu_to_pllp) 160ENDPROC(tegra_switch_cpu_to_pllp)
161#endif 161#endif
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index b743a0ae02ce..2fb78b4648cb 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -152,7 +152,7 @@ static void tc2_pm_down(u64 residency)
152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 152 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
153 arch_spin_unlock(&tc2_pm_lock); 153 arch_spin_unlock(&tc2_pm_lock);
154 154
155 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A15) { 155 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
156 /* 156 /*
157 * On the Cortex-A15 we need to disable 157 * On the Cortex-A15 we need to disable
158 * L2 prefetching before flushing the cache. 158 * L2 prefetching before flushing the cache.
@@ -323,6 +323,21 @@ static void __naked tc2_pm_power_up_setup(unsigned int affinity_level)
323" b cci_enable_port_for_self "); 323" b cci_enable_port_for_self ");
324} 324}
325 325
326static void __init tc2_cache_off(void)
327{
328 pr_info("TC2: disabling cache during MCPM loopback test\n");
329 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
330 /* disable L2 prefetching on the Cortex-A15 */
331 asm volatile(
332 "mcr p15, 1, %0, c15, c0, 3 \n\t"
333 "isb \n\t"
334 "dsb "
335 : : "r" (0x400) );
336 }
337 v7_exit_coherency_flush(all);
338 cci_disable_port_by_cpu(read_cpuid_mpidr());
339}
340
326static int __init tc2_pm_init(void) 341static int __init tc2_pm_init(void)
327{ 342{
328 int ret, irq; 343 int ret, irq;
@@ -370,6 +385,8 @@ static int __init tc2_pm_init(void)
370 ret = mcpm_platform_register(&tc2_pm_power_ops); 385 ret = mcpm_platform_register(&tc2_pm_power_ops);
371 if (!ret) { 386 if (!ret) {
372 mcpm_sync_init(tc2_pm_power_up_setup); 387 mcpm_sync_init(tc2_pm_power_up_setup);
388 /* test if we can (re)enable the CCI on our own */
389 BUG_ON(mcpm_loopback(tc2_cache_off) != 0);
373 pr_info("TC2 power management initialized\n"); 390 pr_info("TC2 power management initialized\n");
374 } 391 }
375 return ret; 392 return ret;
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c348eaee7ee2..df46620206af 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -907,8 +907,8 @@ config PL310_ERRATA_588369
907 They are architecturally defined to behave as the execution of a 907 They are architecturally defined to behave as the execution of a
908 clean operation followed immediately by an invalidate operation, 908 clean operation followed immediately by an invalidate operation,
909 both performing to the same memory location. This functionality 909 both performing to the same memory location. This functionality
910 is not correctly implemented in PL310 as clean lines are not 910 is not correctly implemented in PL310 prior to r2p0 (fixed in r2p0)
911 invalidated as a result of these operations. 911 as clean lines are not invalidated as a result of these operations.
912 912
913config PL310_ERRATA_727915 913config PL310_ERRATA_727915
914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" 914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
@@ -918,7 +918,8 @@ config PL310_ERRATA_727915
918 PL310 can handle normal accesses while it is in progress. Under very 918 PL310 can handle normal accesses while it is in progress. Under very
919 rare circumstances, due to this erratum, write data can be lost when 919 rare circumstances, due to this erratum, write data can be lost when
920 PL310 treats a cacheable write transaction during a Clean & 920 PL310 treats a cacheable write transaction during a Clean &
921 Invalidate by Way operation. 921 Invalidate by Way operation. Revisions prior to r3p1 are affected by
922 this errata (fixed in r3p1).
922 923
923config PL310_ERRATA_753970 924config PL310_ERRATA_753970
924 bool "PL310 errata: cache sync operation may be faulty" 925 bool "PL310 errata: cache sync operation may be faulty"
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b8cb1a2688a0..0c1ab49e5f7b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -76,6 +76,7 @@
76 76
77static unsigned long ai_user; 77static unsigned long ai_user;
78static unsigned long ai_sys; 78static unsigned long ai_sys;
79static void *ai_sys_last_pc;
79static unsigned long ai_skipped; 80static unsigned long ai_skipped;
80static unsigned long ai_half; 81static unsigned long ai_half;
81static unsigned long ai_word; 82static unsigned long ai_word;
@@ -130,7 +131,7 @@ static const char *usermode_action[] = {
130static int alignment_proc_show(struct seq_file *m, void *v) 131static int alignment_proc_show(struct seq_file *m, void *v)
131{ 132{
132 seq_printf(m, "User:\t\t%lu\n", ai_user); 133 seq_printf(m, "User:\t\t%lu\n", ai_user);
133 seq_printf(m, "System:\t\t%lu\n", ai_sys); 134 seq_printf(m, "System:\t\t%lu (%pF)\n", ai_sys, ai_sys_last_pc);
134 seq_printf(m, "Skipped:\t%lu\n", ai_skipped); 135 seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
135 seq_printf(m, "Half:\t\t%lu\n", ai_half); 136 seq_printf(m, "Half:\t\t%lu\n", ai_half);
136 seq_printf(m, "Word:\t\t%lu\n", ai_word); 137 seq_printf(m, "Word:\t\t%lu\n", ai_word);
@@ -794,6 +795,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
794 goto user; 795 goto user;
795 796
796 ai_sys += 1; 797 ai_sys += 1;
798 ai_sys_last_pc = (void *)instruction_pointer(regs);
797 799
798 fixup: 800 fixup:
799 801
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index e505befe51b5..2f0c58836ae7 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -15,6 +15,7 @@
15 */ 15 */
16#include <linux/linkage.h> 16#include <linux/linkage.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <asm/assembler.h>
18#include <asm/memory.h> 19#include <asm/memory.h>
19#include <asm/page.h> 20#include <asm/page.h>
20 21
@@ -45,7 +46,7 @@
45ENTRY(fa_flush_icache_all) 46ENTRY(fa_flush_icache_all)
46 mov r0, #0 47 mov r0, #0
47 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
48 mov pc, lr 49 ret lr
49ENDPROC(fa_flush_icache_all) 50ENDPROC(fa_flush_icache_all)
50 51
51/* 52/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 72 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
72 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 73 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
73 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 74 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -99,7 +100,7 @@ ENTRY(fa_flush_user_cache_range)
99 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 100 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
100 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 101 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
101 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 102 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
102 mov pc, lr 103 ret lr
103 104
104/* 105/*
105 * coherent_kern_range(start, end) 106 * coherent_kern_range(start, end)
@@ -135,7 +136,7 @@ ENTRY(fa_coherent_user_range)
135 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 136 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
136 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 137 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
137 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 138 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
138 mov pc, lr 139 ret lr
139 140
140/* 141/*
141 * flush_kern_dcache_area(void *addr, size_t size) 142 * flush_kern_dcache_area(void *addr, size_t size)
@@ -155,7 +156,7 @@ ENTRY(fa_flush_kern_dcache_area)
155 mov r0, #0 156 mov r0, #0
156 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
157 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 158 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
158 mov pc, lr 159 ret lr
159 160
160/* 161/*
161 * dma_inv_range(start, end) 162 * dma_inv_range(start, end)
@@ -181,7 +182,7 @@ fa_dma_inv_range:
181 blo 1b 182 blo 1b
182 mov r0, #0 183 mov r0, #0
183 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 184 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
184 mov pc, lr 185 ret lr
185 186
186/* 187/*
187 * dma_clean_range(start, end) 188 * dma_clean_range(start, end)
@@ -199,7 +200,7 @@ fa_dma_clean_range:
199 blo 1b 200 blo 1b
200 mov r0, #0 201 mov r0, #0
201 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 202 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
202 mov pc, lr 203 ret lr
203 204
204/* 205/*
205 * dma_flush_range(start,end) 206 * dma_flush_range(start,end)
@@ -214,7 +215,7 @@ ENTRY(fa_dma_flush_range)
214 blo 1b 215 blo 1b
215 mov r0, #0 216 mov r0, #0
216 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 217 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
217 mov pc, lr 218 ret lr
218 219
219/* 220/*
220 * dma_map_area(start, size, dir) 221 * dma_map_area(start, size, dir)
@@ -237,7 +238,7 @@ ENDPROC(fa_dma_map_area)
237 * - dir - DMA direction 238 * - dir - DMA direction
238 */ 239 */
239ENTRY(fa_dma_unmap_area) 240ENTRY(fa_dma_unmap_area)
240 mov pc, lr 241 ret lr
241ENDPROC(fa_dma_unmap_area) 242ENDPROC(fa_dma_unmap_area)
242 243
243 .globl fa_flush_kern_cache_louis 244 .globl fa_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 7c3fb41a462e..5f2c988a06ac 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -665,7 +665,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
666{ 666{
667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK; 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
669 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
671 if (cortex_a9) { 671 if (cortex_a9) {
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S
index 8e12ddca0031..f1cc9861031f 100644
--- a/arch/arm/mm/cache-nop.S
+++ b/arch/arm/mm/cache-nop.S
@@ -5,11 +5,12 @@
5 */ 5 */
6#include <linux/linkage.h> 6#include <linux/linkage.h>
7#include <linux/init.h> 7#include <linux/init.h>
8#include <asm/assembler.h>
8 9
9#include "proc-macros.S" 10#include "proc-macros.S"
10 11
11ENTRY(nop_flush_icache_all) 12ENTRY(nop_flush_icache_all)
12 mov pc, lr 13 ret lr
13ENDPROC(nop_flush_icache_all) 14ENDPROC(nop_flush_icache_all)
14 15
15 .globl nop_flush_kern_cache_all 16 .globl nop_flush_kern_cache_all
@@ -29,7 +30,7 @@ ENDPROC(nop_flush_icache_all)
29 30
30ENTRY(nop_coherent_user_range) 31ENTRY(nop_coherent_user_range)
31 mov r0, 0 32 mov r0, 0
32 mov pc, lr 33 ret lr
33ENDPROC(nop_coherent_user_range) 34ENDPROC(nop_coherent_user_range)
34 35
35 .globl nop_flush_kern_dcache_area 36 .globl nop_flush_kern_dcache_area
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a7ba68f59f0c..91e3adf155cb 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include "proc-macros.S" 14#include "proc-macros.S"
14 15
@@ -18,7 +19,7 @@
18 * Unconditionally clean and invalidate the entire icache. 19 * Unconditionally clean and invalidate the entire icache.
19 */ 20 */
20ENTRY(v4_flush_icache_all) 21ENTRY(v4_flush_icache_all)
21 mov pc, lr 22 ret lr
22ENDPROC(v4_flush_icache_all) 23ENDPROC(v4_flush_icache_all)
23 24
24/* 25/*
@@ -40,7 +41,7 @@ ENTRY(v4_flush_kern_cache_all)
40#ifdef CONFIG_CPU_CP15 41#ifdef CONFIG_CPU_CP15
41 mov r0, #0 42 mov r0, #0
42 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 43 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
43 mov pc, lr 44 ret lr
44#else 45#else
45 /* FALLTHROUGH */ 46 /* FALLTHROUGH */
46#endif 47#endif
@@ -59,7 +60,7 @@ ENTRY(v4_flush_user_cache_range)
59#ifdef CONFIG_CPU_CP15 60#ifdef CONFIG_CPU_CP15
60 mov ip, #0 61 mov ip, #0
61 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 62 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
62 mov pc, lr 63 ret lr
63#else 64#else
64 /* FALLTHROUGH */ 65 /* FALLTHROUGH */
65#endif 66#endif
@@ -89,7 +90,7 @@ ENTRY(v4_coherent_kern_range)
89 */ 90 */
90ENTRY(v4_coherent_user_range) 91ENTRY(v4_coherent_user_range)
91 mov r0, #0 92 mov r0, #0
92 mov pc, lr 93 ret lr
93 94
94/* 95/*
95 * flush_kern_dcache_area(void *addr, size_t size) 96 * flush_kern_dcache_area(void *addr, size_t size)
@@ -116,7 +117,7 @@ ENTRY(v4_dma_flush_range)
116 mov r0, #0 117 mov r0, #0
117 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 118 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
118#endif 119#endif
119 mov pc, lr 120 ret lr
120 121
121/* 122/*
122 * dma_unmap_area(start, size, dir) 123 * dma_unmap_area(start, size, dir)
@@ -136,7 +137,7 @@ ENTRY(v4_dma_unmap_area)
136 * - dir - DMA direction 137 * - dir - DMA direction
137 */ 138 */
138ENTRY(v4_dma_map_area) 139ENTRY(v4_dma_map_area)
139 mov pc, lr 140 ret lr
140ENDPROC(v4_dma_unmap_area) 141ENDPROC(v4_dma_unmap_area)
141ENDPROC(v4_dma_map_area) 142ENDPROC(v4_dma_map_area)
142 143
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index cd4945321407..2522f8c8fbb1 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/assembler.h>
12#include <asm/memory.h> 13#include <asm/memory.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include "proc-macros.S" 15#include "proc-macros.S"
@@ -58,7 +59,7 @@ flush_base:
58ENTRY(v4wb_flush_icache_all) 59ENTRY(v4wb_flush_icache_all)
59 mov r0, #0 60 mov r0, #0
60 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 61 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
61 mov pc, lr 62 ret lr
62ENDPROC(v4wb_flush_icache_all) 63ENDPROC(v4wb_flush_icache_all)
63 64
64/* 65/*
@@ -94,7 +95,7 @@ __flush_whole_cache:
94 blo 1b 95 blo 1b
95#endif 96#endif
96 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer 97 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * flush_user_cache_range(start, end, flags) 101 * flush_user_cache_range(start, end, flags)
@@ -122,7 +123,7 @@ ENTRY(v4wb_flush_user_cache_range)
122 blo 1b 123 blo 1b
123 tst r2, #VM_EXEC 124 tst r2, #VM_EXEC
124 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer 125 mcrne p15, 0, ip, c7, c10, 4 @ drain write buffer
125 mov pc, lr 126 ret lr
126 127
127/* 128/*
128 * flush_kern_dcache_area(void *addr, size_t size) 129 * flush_kern_dcache_area(void *addr, size_t size)
@@ -170,7 +171,7 @@ ENTRY(v4wb_coherent_user_range)
170 mov r0, #0 171 mov r0, #0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 172 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 173 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 174 ret lr
174 175
175 176
176/* 177/*
@@ -195,7 +196,7 @@ v4wb_dma_inv_range:
195 cmp r0, r1 196 cmp r0, r1
196 blo 1b 197 blo 1b
197 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 198 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
198 mov pc, lr 199 ret lr
199 200
200/* 201/*
201 * dma_clean_range(start, end) 202 * dma_clean_range(start, end)
@@ -212,7 +213,7 @@ v4wb_dma_clean_range:
212 cmp r0, r1 213 cmp r0, r1
213 blo 1b 214 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 215 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
215 mov pc, lr 216 ret lr
216 217
217/* 218/*
218 * dma_flush_range(start, end) 219 * dma_flush_range(start, end)
@@ -248,7 +249,7 @@ ENDPROC(v4wb_dma_map_area)
248 * - dir - DMA direction 249 * - dir - DMA direction
249 */ 250 */
250ENTRY(v4wb_dma_unmap_area) 251ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr 252 ret lr
252ENDPROC(v4wb_dma_unmap_area) 253ENDPROC(v4wb_dma_unmap_area)
253 254
254 .globl v4wb_flush_kern_cache_louis 255 .globl v4wb_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index 11e5e5838bc5..a0982ce49007 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -13,6 +13,7 @@
13 */ 13 */
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include "proc-macros.S" 18#include "proc-macros.S"
18 19
@@ -48,7 +49,7 @@
48ENTRY(v4wt_flush_icache_all) 49ENTRY(v4wt_flush_icache_all)
49 mov r0, #0 50 mov r0, #0
50 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
51 mov pc, lr 52 ret lr
52ENDPROC(v4wt_flush_icache_all) 53ENDPROC(v4wt_flush_icache_all)
53 54
54/* 55/*
@@ -71,7 +72,7 @@ __flush_whole_cache:
71 tst r2, #VM_EXEC 72 tst r2, #VM_EXEC
72 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
73 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
74 mov pc, lr 75 ret lr
75 76
76/* 77/*
77 * flush_user_cache_range(start, end, flags) 78 * flush_user_cache_range(start, end, flags)
@@ -94,7 +95,7 @@ ENTRY(v4wt_flush_user_cache_range)
94 add r0, r0, #CACHE_DLINESIZE 95 add r0, r0, #CACHE_DLINESIZE
95 cmp r0, r1 96 cmp r0, r1
96 blo 1b 97 blo 1b
97 mov pc, lr 98 ret lr
98 99
99/* 100/*
100 * coherent_kern_range(start, end) 101 * coherent_kern_range(start, end)
@@ -126,7 +127,7 @@ ENTRY(v4wt_coherent_user_range)
126 cmp r0, r1 127 cmp r0, r1
127 blo 1b 128 blo 1b
128 mov r0, #0 129 mov r0, #0
129 mov pc, lr 130 ret lr
130 131
131/* 132/*
132 * flush_kern_dcache_area(void *addr, size_t size) 133 * flush_kern_dcache_area(void *addr, size_t size)
@@ -160,7 +161,7 @@ v4wt_dma_inv_range:
160 add r0, r0, #CACHE_DLINESIZE 161 add r0, r0, #CACHE_DLINESIZE
161 cmp r0, r1 162 cmp r0, r1
162 blo 1b 163 blo 1b
163 mov pc, lr 164 ret lr
164 165
165/* 166/*
166 * dma_flush_range(start, end) 167 * dma_flush_range(start, end)
@@ -192,7 +193,7 @@ ENTRY(v4wt_dma_unmap_area)
192 * - dir - DMA direction 193 * - dir - DMA direction
193 */ 194 */
194ENTRY(v4wt_dma_map_area) 195ENTRY(v4wt_dma_map_area)
195 mov pc, lr 196 ret lr
196ENDPROC(v4wt_dma_unmap_area) 197ENDPROC(v4wt_dma_unmap_area)
197ENDPROC(v4wt_dma_map_area) 198ENDPROC(v4wt_dma_map_area)
198 199
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index d8fd4d4bd3d4..24659952c278 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -51,7 +51,7 @@ ENTRY(v6_flush_icache_all)
51#else 51#else
52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
53#endif 53#endif
54 mov pc, lr 54 ret lr
55ENDPROC(v6_flush_icache_all) 55ENDPROC(v6_flush_icache_all)
56 56
57/* 57/*
@@ -73,7 +73,7 @@ ENTRY(v6_flush_kern_cache_all)
73#else 73#else
74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate 74 mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
75#endif 75#endif
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * v6_flush_cache_all() 79 * v6_flush_cache_all()
@@ -98,7 +98,7 @@ ENTRY(v6_flush_user_cache_all)
98 * - we have a VIPT cache. 98 * - we have a VIPT cache.
99 */ 99 */
100ENTRY(v6_flush_user_cache_range) 100ENTRY(v6_flush_user_cache_range)
101 mov pc, lr 101 ret lr
102 102
103/* 103/*
104 * v6_coherent_kern_range(start,end) 104 * v6_coherent_kern_range(start,end)
@@ -150,7 +150,7 @@ ENTRY(v6_coherent_user_range)
150#else 150#else
151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB 151 mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
152#endif 152#endif
153 mov pc, lr 153 ret lr
154 154
155/* 155/*
156 * Fault handling for the cache operation above. If the virtual address in r0 156 * Fault handling for the cache operation above. If the virtual address in r0
@@ -158,7 +158,7 @@ ENTRY(v6_coherent_user_range)
158 */ 158 */
1599001: 1599001:
160 mov r0, #-EFAULT 160 mov r0, #-EFAULT
161 mov pc, lr 161 ret lr
162 UNWIND(.fnend ) 162 UNWIND(.fnend )
163ENDPROC(v6_coherent_user_range) 163ENDPROC(v6_coherent_user_range)
164ENDPROC(v6_coherent_kern_range) 164ENDPROC(v6_coherent_kern_range)
@@ -188,7 +188,7 @@ ENTRY(v6_flush_kern_dcache_area)
188 mov r0, #0 188 mov r0, #0
189 mcr p15, 0, r0, c7, c10, 4 189 mcr p15, 0, r0, c7, c10, 4
190#endif 190#endif
191 mov pc, lr 191 ret lr
192 192
193 193
194/* 194/*
@@ -239,7 +239,7 @@ v6_dma_inv_range:
239 blo 1b 239 blo 1b
240 mov r0, #0 240 mov r0, #0
241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 241 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * v6_dma_clean_range(start,end) 245 * v6_dma_clean_range(start,end)
@@ -262,7 +262,7 @@ v6_dma_clean_range:
262 blo 1b 262 blo 1b
263 mov r0, #0 263 mov r0, #0
264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 264 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
265 mov pc, lr 265 ret lr
266 266
267/* 267/*
268 * v6_dma_flush_range(start,end) 268 * v6_dma_flush_range(start,end)
@@ -290,7 +290,7 @@ ENTRY(v6_dma_flush_range)
290 blo 1b 290 blo 1b
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 292 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
293 mov pc, lr 293 ret lr
294 294
295/* 295/*
296 * dma_map_area(start, size, dir) 296 * dma_map_area(start, size, dir)
@@ -323,7 +323,7 @@ ENTRY(v6_dma_unmap_area)
323 teq r2, #DMA_TO_DEVICE 323 teq r2, #DMA_TO_DEVICE
324 bne v6_dma_inv_range 324 bne v6_dma_inv_range
325#endif 325#endif
326 mov pc, lr 326 ret lr
327ENDPROC(v6_dma_unmap_area) 327ENDPROC(v6_dma_unmap_area)
328 328
329 .globl v6_flush_kern_cache_louis 329 .globl v6_flush_kern_cache_louis
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 615c99e38ba1..b966656d2c2d 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -61,7 +61,7 @@ ENTRY(v7_invalidate_l1)
61 bgt 1b 61 bgt 1b
62 dsb st 62 dsb st
63 isb 63 isb
64 mov pc, lr 64 ret lr
65ENDPROC(v7_invalidate_l1) 65ENDPROC(v7_invalidate_l1)
66 66
67/* 67/*
@@ -76,7 +76,7 @@ ENTRY(v7_flush_icache_all)
76 mov r0, #0 76 mov r0, #0
77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
79 mov pc, lr 79 ret lr
80ENDPROC(v7_flush_icache_all) 80ENDPROC(v7_flush_icache_all)
81 81
82 /* 82 /*
@@ -94,7 +94,7 @@ ENTRY(v7_flush_dcache_louis)
94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr 94 ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
95#ifdef CONFIG_ARM_ERRATA_643719 95#ifdef CONFIG_ARM_ERRATA_643719
96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register 96 ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
97 ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do 97 ALT_UP(reteq lr) @ LoUU is zero, so nothing to do
98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? 98 ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
99 biceq r2, r2, #0x0000000f @ clear minor revision number 99 biceq r2, r2, #0x0000000f @ clear minor revision number
100 teqeq r2, r1 @ test for errata affected core and if so... 100 teqeq r2, r1 @ test for errata affected core and if so...
@@ -102,7 +102,7 @@ ENTRY(v7_flush_dcache_louis)
102#endif 102#endif
103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 103 ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 104 ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
105 moveq pc, lr @ return if level == 0 105 reteq lr @ return if level == 0
106 mov r10, #0 @ r10 (starting level) = 0 106 mov r10, #0 @ r10 (starting level) = 0
107 b flush_levels @ start flushing cache levels 107 b flush_levels @ start flushing cache levels
108ENDPROC(v7_flush_dcache_louis) 108ENDPROC(v7_flush_dcache_louis)
@@ -168,7 +168,7 @@ finished:
168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 168 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
169 dsb st 169 dsb st
170 isb 170 isb
171 mov pc, lr 171 ret lr
172ENDPROC(v7_flush_dcache_all) 172ENDPROC(v7_flush_dcache_all)
173 173
174/* 174/*
@@ -191,7 +191,7 @@ ENTRY(v7_flush_kern_cache_all)
191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 191 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 192 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 193 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
194 mov pc, lr 194 ret lr
195ENDPROC(v7_flush_kern_cache_all) 195ENDPROC(v7_flush_kern_cache_all)
196 196
197 /* 197 /*
@@ -209,7 +209,7 @@ ENTRY(v7_flush_kern_cache_louis)
209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 209 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) 210 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) 211 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
212 mov pc, lr 212 ret lr
213ENDPROC(v7_flush_kern_cache_louis) 213ENDPROC(v7_flush_kern_cache_louis)
214 214
215/* 215/*
@@ -235,7 +235,7 @@ ENTRY(v7_flush_user_cache_all)
235 * - we have a VIPT cache. 235 * - we have a VIPT cache.
236 */ 236 */
237ENTRY(v7_flush_user_cache_range) 237ENTRY(v7_flush_user_cache_range)
238 mov pc, lr 238 ret lr
239ENDPROC(v7_flush_user_cache_all) 239ENDPROC(v7_flush_user_cache_all)
240ENDPROC(v7_flush_user_cache_range) 240ENDPROC(v7_flush_user_cache_range)
241 241
@@ -296,7 +296,7 @@ ENTRY(v7_coherent_user_range)
296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 296 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
297 dsb ishst 297 dsb ishst
298 isb 298 isb
299 mov pc, lr 299 ret lr
300 300
301/* 301/*
302 * Fault handling for the cache operation above. If the virtual address in r0 302 * Fault handling for the cache operation above. If the virtual address in r0
@@ -307,7 +307,7 @@ ENTRY(v7_coherent_user_range)
307 dsb 307 dsb
308#endif 308#endif
309 mov r0, #-EFAULT 309 mov r0, #-EFAULT
310 mov pc, lr 310 ret lr
311 UNWIND(.fnend ) 311 UNWIND(.fnend )
312ENDPROC(v7_coherent_kern_range) 312ENDPROC(v7_coherent_kern_range)
313ENDPROC(v7_coherent_user_range) 313ENDPROC(v7_coherent_user_range)
@@ -336,7 +336,7 @@ ENTRY(v7_flush_kern_dcache_area)
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 dsb st 338 dsb st
339 mov pc, lr 339 ret lr
340ENDPROC(v7_flush_kern_dcache_area) 340ENDPROC(v7_flush_kern_dcache_area)
341 341
342/* 342/*
@@ -369,7 +369,7 @@ v7_dma_inv_range:
369 cmp r0, r1 369 cmp r0, r1
370 blo 1b 370 blo 1b
371 dsb st 371 dsb st
372 mov pc, lr 372 ret lr
373ENDPROC(v7_dma_inv_range) 373ENDPROC(v7_dma_inv_range)
374 374
375/* 375/*
@@ -391,7 +391,7 @@ v7_dma_clean_range:
391 cmp r0, r1 391 cmp r0, r1
392 blo 1b 392 blo 1b
393 dsb st 393 dsb st
394 mov pc, lr 394 ret lr
395ENDPROC(v7_dma_clean_range) 395ENDPROC(v7_dma_clean_range)
396 396
397/* 397/*
@@ -413,7 +413,7 @@ ENTRY(v7_dma_flush_range)
413 cmp r0, r1 413 cmp r0, r1
414 blo 1b 414 blo 1b
415 dsb st 415 dsb st
416 mov pc, lr 416 ret lr
417ENDPROC(v7_dma_flush_range) 417ENDPROC(v7_dma_flush_range)
418 418
419/* 419/*
@@ -439,7 +439,7 @@ ENTRY(v7_dma_unmap_area)
439 add r1, r1, r0 439 add r1, r1, r0
440 teq r2, #DMA_TO_DEVICE 440 teq r2, #DMA_TO_DEVICE
441 bne v7_dma_inv_range 441 bne v7_dma_inv_range
442 mov pc, lr 442 ret lr
443ENDPROC(v7_dma_unmap_area) 443ENDPROC(v7_dma_unmap_area)
444 444
445 __INITDATA 445 __INITDATA
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index c508f41a43bc..59424937e52b 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
126 .val = PMD_SECT_USER, 126 .val = PMD_SECT_USER,
127 .set = "USR", 127 .set = "USR",
128 }, { 128 }, {
129 .mask = PMD_SECT_RDONLY, 129 .mask = L_PMD_SECT_RDONLY,
130 .val = PMD_SECT_RDONLY, 130 .val = L_PMD_SECT_RDONLY,
131 .set = "ro", 131 .set = "ro",
132 .clear = "RW", 132 .clear = "RW",
133#elif __LINUX_ARM_ARCH__ >= 6 133#elif __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm/mm/l2c-l2x0-resume.S b/arch/arm/mm/l2c-l2x0-resume.S
index 99b05f21a59a..fda415e4ca8f 100644
--- a/arch/arm/mm/l2c-l2x0-resume.S
+++ b/arch/arm/mm/l2c-l2x0-resume.S
@@ -6,6 +6,7 @@
6 * This code can only be used to if you are running in the secure world. 6 * This code can only be used to if you are running in the secure world.
7 */ 7 */
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/assembler.h>
9#include <asm/hardware/cache-l2x0.h> 10#include <asm/hardware/cache-l2x0.h>
10 11
11 .text 12 .text
@@ -27,7 +28,7 @@ ENTRY(l2c310_early_resume)
27 28
28 @ Check that the address has been initialised 29 @ Check that the address has been initialised
29 teq r1, #0 30 teq r1, #0
30 moveq pc, lr 31 reteq lr
31 32
32 @ The prefetch and power control registers are revision dependent 33 @ The prefetch and power control registers are revision dependent
33 @ and can be written whether or not the L2 cache is enabled 34 @ and can be written whether or not the L2 cache is enabled
@@ -41,7 +42,7 @@ ENTRY(l2c310_early_resume)
41 @ Don't setup the L2 cache if it is already enabled 42 @ Don't setup the L2 cache if it is already enabled
42 ldr r0, [r1, #L2X0_CTRL] 43 ldr r0, [r1, #L2X0_CTRL]
43 tst r0, #L2X0_CTRL_EN 44 tst r0, #L2X0_CTRL_EN
44 movne pc, lr 45 retne lr
45 46
46 str r3, [r1, #L310_TAG_LATENCY_CTRL] 47 str r3, [r1, #L310_TAG_LATENCY_CTRL]
47 str r4, [r1, #L310_DATA_LATENCY_CTRL] 48 str r4, [r1, #L310_DATA_LATENCY_CTRL]
@@ -51,7 +52,7 @@ ENTRY(l2c310_early_resume)
51 str r2, [r1, #L2X0_AUX_CTRL] 52 str r2, [r1, #L2X0_AUX_CTRL]
52 mov r9, #L2X0_CTRL_EN 53 mov r9, #L2X0_CTRL_EN
53 str r9, [r1, #L2X0_CTRL] 54 str r9, [r1, #L2X0_CTRL]
54 mov pc, lr 55 ret lr
55ENDPROC(l2c310_early_resume) 56ENDPROC(l2c310_early_resume)
56 57
57 .align 58 .align
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6e3ba8d112a2..8348ed6b2efe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1434,23 +1434,64 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1434 dsb(ishst); 1434 dsb(ishst);
1435 isb(); 1435 isb();
1436 1436
1437 /* remap level 1 table */ 1437 /*
1438 * FIXME: This code is not architecturally compliant: we modify
1439 * the mappings in-place, indeed while they are in use by this
1440 * very same code. This may lead to unpredictable behaviour of
1441 * the CPU.
1442 *
1443 * Even modifying the mappings in a separate page table does
1444 * not resolve this.
1445 *
1446 * The architecture strongly recommends that when a mapping is
1447 * changed, that it is changed by first going via an invalid
1448 * mapping and back to the new mapping. This is to ensure that
1449 * no TLB conflicts (caused by the TLB having more than one TLB
1450 * entry match a translation) can occur. However, doing that
1451 * here will result in unmapping the code we are running.
1452 */
1453 pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
1454 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
1455
1456 /*
1457 * Remap level 1 table. This changes the physical addresses
1458 * used to refer to the level 2 page tables to the high
1459 * physical address alias, leaving everything else the same.
1460 */
1438 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) { 1461 for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
1439 set_pud(pud0, 1462 set_pud(pud0,
1440 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER)); 1463 __pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
1441 pmd0 += PTRS_PER_PMD; 1464 pmd0 += PTRS_PER_PMD;
1442 } 1465 }
1443 1466
1444 /* remap pmds for kernel mapping */ 1467 /*
1468 * Remap the level 2 table, pointing the mappings at the high
1469 * physical address alias of these pages.
1470 */
1445 phys = __pa(map_start); 1471 phys = __pa(map_start);
1446 do { 1472 do {
1447 *pmdk++ = __pmd(phys | pmdprot); 1473 *pmdk++ = __pmd(phys | pmdprot);
1448 phys += PMD_SIZE; 1474 phys += PMD_SIZE;
1449 } while (phys < map_end); 1475 } while (phys < map_end);
1450 1476
1477 /*
1478 * Ensure that the above updates are flushed out of the cache.
1479 * This is not strictly correct; on a system where the caches
1480 * are coherent with each other, but the MMU page table walks
1481 * may not be coherent, flush_cache_all() may be a no-op, and
1482 * this will fail.
1483 */
1451 flush_cache_all(); 1484 flush_cache_all();
1485
1486 /*
1487 * Re-write the TTBR values to point them at the high physical
1488 * alias of the page tables. We expect __va() will work on
1489 * cpu_get_pgd(), which returns the value of TTBR0.
1490 */
1452 cpu_switch_mm(pgd0, &init_mm); 1491 cpu_switch_mm(pgd0, &init_mm);
1453 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); 1492 cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
1493
1494 /* Finally flush any stale TLB values. */
1454 local_flush_bp_all(); 1495 local_flush_bp_all();
1455 local_flush_tlb_all(); 1496 local_flush_tlb_all();
1456} 1497}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index d1a2d05971e0..86ee5d47ce3c 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020_proc_init() 73 * cpu_arm1020_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020_proc_init) 75ENTRY(cpu_arm1020_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020_proc_fin() 79 * cpu_arm1020_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020_reset(loc) 89 * cpu_arm1020_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020_reset) 111ENDPROC(cpu_arm1020_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020_do_idle) 118ENTRY(cpu_arm1020_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020_flush_icache_all) 137ENDPROC(arm1020_flush_icache_all)
138 138
139/* 139/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 169 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
170#endif 170#endif
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -200,7 +200,7 @@ ENTRY(arm1020_flush_user_cache_range)
200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 200 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
201#endif 201#endif
202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 202 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
203 mov pc, lr 203 ret lr
204 204
205/* 205/*
206 * coherent_kern_range(start, end) 206 * coherent_kern_range(start, end)
@@ -242,7 +242,7 @@ ENTRY(arm1020_coherent_user_range)
242 blo 1b 242 blo 1b
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov r0, #0 244 mov r0, #0
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * flush_kern_dcache_area(void *addr, size_t size) 248 * flush_kern_dcache_area(void *addr, size_t size)
@@ -264,7 +264,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
264 blo 1b 264 blo 1b
265#endif 265#endif
266 mcr p15, 0, ip, c7, c10, 4 @ drain WB 266 mcr p15, 0, ip, c7, c10, 4 @ drain WB
267 mov pc, lr 267 ret lr
268 268
269/* 269/*
270 * dma_inv_range(start, end) 270 * dma_inv_range(start, end)
@@ -297,7 +297,7 @@ arm1020_dma_inv_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_clean_range(start, end) 303 * dma_clean_range(start, end)
@@ -320,7 +320,7 @@ arm1020_dma_clean_range:
320 blo 1b 320 blo 1b
321#endif 321#endif
322 mcr p15, 0, ip, c7, c10, 4 @ drain WB 322 mcr p15, 0, ip, c7, c10, 4 @ drain WB
323 mov pc, lr 323 ret lr
324 324
325/* 325/*
326 * dma_flush_range(start, end) 326 * dma_flush_range(start, end)
@@ -342,7 +342,7 @@ ENTRY(arm1020_dma_flush_range)
342 blo 1b 342 blo 1b
343#endif 343#endif
344 mcr p15, 0, ip, c7, c10, 4 @ drain WB 344 mcr p15, 0, ip, c7, c10, 4 @ drain WB
345 mov pc, lr 345 ret lr
346 346
347/* 347/*
348 * dma_map_area(start, size, dir) 348 * dma_map_area(start, size, dir)
@@ -365,7 +365,7 @@ ENDPROC(arm1020_dma_map_area)
365 * - dir - DMA direction 365 * - dir - DMA direction
366 */ 366 */
367ENTRY(arm1020_dma_unmap_area) 367ENTRY(arm1020_dma_unmap_area)
368 mov pc, lr 368 ret lr
369ENDPROC(arm1020_dma_unmap_area) 369ENDPROC(arm1020_dma_unmap_area)
370 370
371 .globl arm1020_flush_kern_cache_louis 371 .globl arm1020_flush_kern_cache_louis
@@ -384,7 +384,7 @@ ENTRY(cpu_arm1020_dcache_clean_area)
384 subs r1, r1, #CACHE_DLINESIZE 384 subs r1, r1, #CACHE_DLINESIZE
385 bhi 1b 385 bhi 1b
386#endif 386#endif
387 mov pc, lr 387 ret lr
388 388
389/* =============================== PageTable ============================== */ 389/* =============================== PageTable ============================== */
390 390
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020_switch_mm)
423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 423 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 424 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428/* 428/*
429 * cpu_arm1020_set_pte(ptep, pte) 429 * cpu_arm1020_set_pte(ptep, pte)
@@ -441,7 +441,7 @@ ENTRY(cpu_arm1020_set_pte_ext)
441#endif 441#endif
442 mcr p15, 0, r0, c7, c10, 4 @ drain WB 442 mcr p15, 0, r0, c7, c10, 4 @ drain WB
443#endif /* CONFIG_MMU */ 443#endif /* CONFIG_MMU */
444 mov pc, lr 444 ret lr
445 445
446 .type __arm1020_setup, #function 446 .type __arm1020_setup, #function
447__arm1020_setup: 447__arm1020_setup:
@@ -460,7 +460,7 @@ __arm1020_setup:
460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 460#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
461 orr r0, r0, #0x4000 @ .R.. .... .... .... 461 orr r0, r0, #0x4000 @ .R.. .... .... ....
462#endif 462#endif
463 mov pc, lr 463 ret lr
464 .size __arm1020_setup, . - __arm1020_setup 464 .size __arm1020_setup, . - __arm1020_setup
465 465
466 /* 466 /*
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 9d89405c3d03..a6331d78601f 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -73,7 +73,7 @@
73 * cpu_arm1020e_proc_init() 73 * cpu_arm1020e_proc_init()
74 */ 74 */
75ENTRY(cpu_arm1020e_proc_init) 75ENTRY(cpu_arm1020e_proc_init)
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm1020e_proc_fin() 79 * cpu_arm1020e_proc_fin()
@@ -83,7 +83,7 @@ ENTRY(cpu_arm1020e_proc_fin)
83 bic r0, r0, #0x1000 @ ...i............ 83 bic r0, r0, #0x1000 @ ...i............
84 bic r0, r0, #0x000e @ ............wca. 84 bic r0, r0, #0x000e @ ............wca.
85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_arm1020e_reset(loc) 89 * cpu_arm1020e_reset(loc)
@@ -107,7 +107,7 @@ ENTRY(cpu_arm1020e_reset)
107 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x000f @ ............wcam
108 bic ip, ip, #0x1100 @ ...i...s........ 108 bic ip, ip, #0x1100 @ ...i...s........
109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
110 mov pc, r0 110 ret r0
111ENDPROC(cpu_arm1020e_reset) 111ENDPROC(cpu_arm1020e_reset)
112 .popsection 112 .popsection
113 113
@@ -117,7 +117,7 @@ ENDPROC(cpu_arm1020e_reset)
117 .align 5 117 .align 5
118ENTRY(cpu_arm1020e_do_idle) 118ENTRY(cpu_arm1020e_do_idle)
119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 119 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
120 mov pc, lr 120 ret lr
121 121
122/* ================================= CACHE ================================ */ 122/* ================================= CACHE ================================ */
123 123
@@ -133,7 +133,7 @@ ENTRY(arm1020e_flush_icache_all)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 134 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
135#endif 135#endif
136 mov pc, lr 136 ret lr
137ENDPROC(arm1020e_flush_icache_all) 137ENDPROC(arm1020e_flush_icache_all)
138 138
139/* 139/*
@@ -168,7 +168,7 @@ __flush_whole_cache:
168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 168 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
169#endif 169#endif
170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 170 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
171 mov pc, lr 171 ret lr
172 172
173/* 173/*
174 * flush_user_cache_range(start, end, flags) 174 * flush_user_cache_range(start, end, flags)
@@ -197,7 +197,7 @@ ENTRY(arm1020e_flush_user_cache_range)
197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 197 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
198#endif 198#endif
199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 199 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
200 mov pc, lr 200 ret lr
201 201
202/* 202/*
203 * coherent_kern_range(start, end) 203 * coherent_kern_range(start, end)
@@ -236,7 +236,7 @@ ENTRY(arm1020e_coherent_user_range)
236 blo 1b 236 blo 1b
237 mcr p15, 0, ip, c7, c10, 4 @ drain WB 237 mcr p15, 0, ip, c7, c10, 4 @ drain WB
238 mov r0, #0 238 mov r0, #0
239 mov pc, lr 239 ret lr
240 240
241/* 241/*
242 * flush_kern_dcache_area(void *addr, size_t size) 242 * flush_kern_dcache_area(void *addr, size_t size)
@@ -257,7 +257,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
257 blo 1b 257 blo 1b
258#endif 258#endif
259 mcr p15, 0, ip, c7, c10, 4 @ drain WB 259 mcr p15, 0, ip, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_inv_range(start, end) 263 * dma_inv_range(start, end)
@@ -286,7 +286,7 @@ arm1020e_dma_inv_range:
286 blo 1b 286 blo 1b
287#endif 287#endif
288 mcr p15, 0, ip, c7, c10, 4 @ drain WB 288 mcr p15, 0, ip, c7, c10, 4 @ drain WB
289 mov pc, lr 289 ret lr
290 290
291/* 291/*
292 * dma_clean_range(start, end) 292 * dma_clean_range(start, end)
@@ -308,7 +308,7 @@ arm1020e_dma_clean_range:
308 blo 1b 308 blo 1b
309#endif 309#endif
310 mcr p15, 0, ip, c7, c10, 4 @ drain WB 310 mcr p15, 0, ip, c7, c10, 4 @ drain WB
311 mov pc, lr 311 ret lr
312 312
313/* 313/*
314 * dma_flush_range(start, end) 314 * dma_flush_range(start, end)
@@ -328,7 +328,7 @@ ENTRY(arm1020e_dma_flush_range)
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, ip, c7, c10, 4 @ drain WB 330 mcr p15, 0, ip, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_map_area(start, size, dir) 334 * dma_map_area(start, size, dir)
@@ -351,7 +351,7 @@ ENDPROC(arm1020e_dma_map_area)
351 * - dir - DMA direction 351 * - dir - DMA direction
352 */ 352 */
353ENTRY(arm1020e_dma_unmap_area) 353ENTRY(arm1020e_dma_unmap_area)
354 mov pc, lr 354 ret lr
355ENDPROC(arm1020e_dma_unmap_area) 355ENDPROC(arm1020e_dma_unmap_area)
356 356
357 .globl arm1020e_flush_kern_cache_louis 357 .globl arm1020e_flush_kern_cache_louis
@@ -369,7 +369,7 @@ ENTRY(cpu_arm1020e_dcache_clean_area)
369 subs r1, r1, #CACHE_DLINESIZE 369 subs r1, r1, #CACHE_DLINESIZE
370 bhi 1b 370 bhi 1b
371#endif 371#endif
372 mov pc, lr 372 ret lr
373 373
374/* =============================== PageTable ============================== */ 374/* =============================== PageTable ============================== */
375 375
@@ -407,7 +407,7 @@ ENTRY(cpu_arm1020e_switch_mm)
407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 407 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 408 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
409#endif 409#endif
410 mov pc, lr 410 ret lr
411 411
412/* 412/*
413 * cpu_arm1020e_set_pte(ptep, pte) 413 * cpu_arm1020e_set_pte(ptep, pte)
@@ -423,7 +423,7 @@ ENTRY(cpu_arm1020e_set_pte_ext)
423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 423 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
424#endif 424#endif
425#endif /* CONFIG_MMU */ 425#endif /* CONFIG_MMU */
426 mov pc, lr 426 ret lr
427 427
428 .type __arm1020e_setup, #function 428 .type __arm1020e_setup, #function
429__arm1020e_setup: 429__arm1020e_setup:
@@ -441,7 +441,7 @@ __arm1020e_setup:
441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 441#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
442 orr r0, r0, #0x4000 @ .R.. .... .... .... 442 orr r0, r0, #0x4000 @ .R.. .... .... ....
443#endif 443#endif
444 mov pc, lr 444 ret lr
445 .size __arm1020e_setup, . - __arm1020e_setup 445 .size __arm1020e_setup, . - __arm1020e_setup
446 446
447 /* 447 /*
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 6f01a0ae3b30..a126b7a59928 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -62,7 +62,7 @@
62 * cpu_arm1022_proc_init() 62 * cpu_arm1022_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1022_proc_init) 64ENTRY(cpu_arm1022_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1022_proc_fin() 68 * cpu_arm1022_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1022_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1022_reset(loc) 78 * cpu_arm1022_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1022_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1022_reset) 100ENDPROC(cpu_arm1022_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1022_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1022_do_idle) 107ENTRY(cpu_arm1022_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1022_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1022_flush_icache_all) 126ENDPROC(arm1022_flush_icache_all)
127 127
128/* 128/*
@@ -156,7 +156,7 @@ __flush_whole_cache:
156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 156 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
157#endif 157#endif
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * flush_user_cache_range(start, end, flags) 162 * flush_user_cache_range(start, end, flags)
@@ -185,7 +185,7 @@ ENTRY(arm1022_flush_user_cache_range)
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
186#endif 186#endif
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
188 mov pc, lr 188 ret lr
189 189
190/* 190/*
191 * coherent_kern_range(start, end) 191 * coherent_kern_range(start, end)
@@ -225,7 +225,7 @@ ENTRY(arm1022_coherent_user_range)
225 blo 1b 225 blo 1b
226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB
227 mov r0, #0 227 mov r0, #0
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * flush_kern_dcache_area(void *addr, size_t size) 231 * flush_kern_dcache_area(void *addr, size_t size)
@@ -246,7 +246,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
246 blo 1b 246 blo 1b
247#endif 247#endif
248 mcr p15, 0, ip, c7, c10, 4 @ drain WB 248 mcr p15, 0, ip, c7, c10, 4 @ drain WB
249 mov pc, lr 249 ret lr
250 250
251/* 251/*
252 * dma_inv_range(start, end) 252 * dma_inv_range(start, end)
@@ -275,7 +275,7 @@ arm1022_dma_inv_range:
275 blo 1b 275 blo 1b
276#endif 276#endif
277 mcr p15, 0, ip, c7, c10, 4 @ drain WB 277 mcr p15, 0, ip, c7, c10, 4 @ drain WB
278 mov pc, lr 278 ret lr
279 279
280/* 280/*
281 * dma_clean_range(start, end) 281 * dma_clean_range(start, end)
@@ -297,7 +297,7 @@ arm1022_dma_clean_range:
297 blo 1b 297 blo 1b
298#endif 298#endif
299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB
300 mov pc, lr 300 ret lr
301 301
302/* 302/*
303 * dma_flush_range(start, end) 303 * dma_flush_range(start, end)
@@ -317,7 +317,7 @@ ENTRY(arm1022_dma_flush_range)
317 blo 1b 317 blo 1b
318#endif 318#endif
319 mcr p15, 0, ip, c7, c10, 4 @ drain WB 319 mcr p15, 0, ip, c7, c10, 4 @ drain WB
320 mov pc, lr 320 ret lr
321 321
322/* 322/*
323 * dma_map_area(start, size, dir) 323 * dma_map_area(start, size, dir)
@@ -340,7 +340,7 @@ ENDPROC(arm1022_dma_map_area)
340 * - dir - DMA direction 340 * - dir - DMA direction
341 */ 341 */
342ENTRY(arm1022_dma_unmap_area) 342ENTRY(arm1022_dma_unmap_area)
343 mov pc, lr 343 ret lr
344ENDPROC(arm1022_dma_unmap_area) 344ENDPROC(arm1022_dma_unmap_area)
345 345
346 .globl arm1022_flush_kern_cache_louis 346 .globl arm1022_flush_kern_cache_louis
@@ -358,7 +358,7 @@ ENTRY(cpu_arm1022_dcache_clean_area)
358 subs r1, r1, #CACHE_DLINESIZE 358 subs r1, r1, #CACHE_DLINESIZE
359 bhi 1b 359 bhi 1b
360#endif 360#endif
361 mov pc, lr 361 ret lr
362 362
363/* =============================== PageTable ============================== */ 363/* =============================== PageTable ============================== */
364 364
@@ -389,7 +389,7 @@ ENTRY(cpu_arm1022_switch_mm)
389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 389 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 390 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
391#endif 391#endif
392 mov pc, lr 392 ret lr
393 393
394/* 394/*
395 * cpu_arm1022_set_pte_ext(ptep, pte, ext) 395 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
@@ -405,7 +405,7 @@ ENTRY(cpu_arm1022_set_pte_ext)
405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 405 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
406#endif 406#endif
407#endif /* CONFIG_MMU */ 407#endif /* CONFIG_MMU */
408 mov pc, lr 408 ret lr
409 409
410 .type __arm1022_setup, #function 410 .type __arm1022_setup, #function
411__arm1022_setup: 411__arm1022_setup:
@@ -423,7 +423,7 @@ __arm1022_setup:
423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 423#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
424 orr r0, r0, #0x4000 @ .R.............. 424 orr r0, r0, #0x4000 @ .R..............
425#endif 425#endif
426 mov pc, lr 426 ret lr
427 .size __arm1022_setup, . - __arm1022_setup 427 .size __arm1022_setup, . - __arm1022_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 4799a24b43e6..fc294067e977 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -62,7 +62,7 @@
62 * cpu_arm1026_proc_init() 62 * cpu_arm1026_proc_init()
63 */ 63 */
64ENTRY(cpu_arm1026_proc_init) 64ENTRY(cpu_arm1026_proc_init)
65 mov pc, lr 65 ret lr
66 66
67/* 67/*
68 * cpu_arm1026_proc_fin() 68 * cpu_arm1026_proc_fin()
@@ -72,7 +72,7 @@ ENTRY(cpu_arm1026_proc_fin)
72 bic r0, r0, #0x1000 @ ...i............ 72 bic r0, r0, #0x1000 @ ...i............
73 bic r0, r0, #0x000e @ ............wca. 73 bic r0, r0, #0x000e @ ............wca.
74 mcr p15, 0, r0, c1, c0, 0 @ disable caches 74 mcr p15, 0, r0, c1, c0, 0 @ disable caches
75 mov pc, lr 75 ret lr
76 76
77/* 77/*
78 * cpu_arm1026_reset(loc) 78 * cpu_arm1026_reset(loc)
@@ -96,7 +96,7 @@ ENTRY(cpu_arm1026_reset)
96 bic ip, ip, #0x000f @ ............wcam 96 bic ip, ip, #0x000f @ ............wcam
97 bic ip, ip, #0x1100 @ ...i...s........ 97 bic ip, ip, #0x1100 @ ...i...s........
98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 98 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
99 mov pc, r0 99 ret r0
100ENDPROC(cpu_arm1026_reset) 100ENDPROC(cpu_arm1026_reset)
101 .popsection 101 .popsection
102 102
@@ -106,7 +106,7 @@ ENDPROC(cpu_arm1026_reset)
106 .align 5 106 .align 5
107ENTRY(cpu_arm1026_do_idle) 107ENTRY(cpu_arm1026_do_idle)
108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 108 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
109 mov pc, lr 109 ret lr
110 110
111/* ================================= CACHE ================================ */ 111/* ================================= CACHE ================================ */
112 112
@@ -122,7 +122,7 @@ ENTRY(arm1026_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124#endif 124#endif
125 mov pc, lr 125 ret lr
126ENDPROC(arm1026_flush_icache_all) 126ENDPROC(arm1026_flush_icache_all)
127 127
128/* 128/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
152#endif 152#endif
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -180,7 +180,7 @@ ENTRY(arm1026_flush_user_cache_range)
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
181#endif 181#endif
182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
183 mov pc, lr 183 ret lr
184 184
185/* 185/*
186 * coherent_kern_range(start, end) 186 * coherent_kern_range(start, end)
@@ -219,7 +219,7 @@ ENTRY(arm1026_coherent_user_range)
219 blo 1b 219 blo 1b
220 mcr p15, 0, ip, c7, c10, 4 @ drain WB 220 mcr p15, 0, ip, c7, c10, 4 @ drain WB
221 mov r0, #0 221 mov r0, #0
222 mov pc, lr 222 ret lr
223 223
224/* 224/*
225 * flush_kern_dcache_area(void *addr, size_t size) 225 * flush_kern_dcache_area(void *addr, size_t size)
@@ -240,7 +240,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
240 blo 1b 240 blo 1b
241#endif 241#endif
242 mcr p15, 0, ip, c7, c10, 4 @ drain WB 242 mcr p15, 0, ip, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_inv_range(start, end) 246 * dma_inv_range(start, end)
@@ -269,7 +269,7 @@ arm1026_dma_inv_range:
269 blo 1b 269 blo 1b
270#endif 270#endif
271 mcr p15, 0, ip, c7, c10, 4 @ drain WB 271 mcr p15, 0, ip, c7, c10, 4 @ drain WB
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * dma_clean_range(start, end) 275 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm1026_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, ip, c7, c10, 4 @ drain WB 293 mcr p15, 0, ip, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(arm1026_dma_flush_range)
311 blo 1b 311 blo 1b
312#endif 312#endif
313 mcr p15, 0, ip, c7, c10, 4 @ drain WB 313 mcr p15, 0, ip, c7, c10, 4 @ drain WB
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(arm1026_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(arm1026_dma_unmap_area) 336ENTRY(arm1026_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(arm1026_dma_unmap_area) 338ENDPROC(arm1026_dma_unmap_area)
339 339
340 .globl arm1026_flush_kern_cache_louis 340 .globl arm1026_flush_kern_cache_louis
@@ -352,7 +352,7 @@ ENTRY(cpu_arm1026_dcache_clean_area)
352 subs r1, r1, #CACHE_DLINESIZE 352 subs r1, r1, #CACHE_DLINESIZE
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mov pc, lr 355 ret lr
356 356
357/* =============================== PageTable ============================== */ 357/* =============================== PageTable ============================== */
358 358
@@ -378,7 +378,7 @@ ENTRY(cpu_arm1026_switch_mm)
378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 378 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 379 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
380#endif 380#endif
381 mov pc, lr 381 ret lr
382 382
383/* 383/*
384 * cpu_arm1026_set_pte_ext(ptep, pte, ext) 384 * cpu_arm1026_set_pte_ext(ptep, pte, ext)
@@ -394,7 +394,7 @@ ENTRY(cpu_arm1026_set_pte_ext)
394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 394 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
395#endif 395#endif
396#endif /* CONFIG_MMU */ 396#endif /* CONFIG_MMU */
397 mov pc, lr 397 ret lr
398 398
399 .type __arm1026_setup, #function 399 .type __arm1026_setup, #function
400__arm1026_setup: 400__arm1026_setup:
@@ -417,7 +417,7 @@ __arm1026_setup:
417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 417#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
418 orr r0, r0, #0x4000 @ .R.. .... .... .... 418 orr r0, r0, #0x4000 @ .R.. .... .... ....
419#endif 419#endif
420 mov pc, lr 420 ret lr
421 .size __arm1026_setup, . - __arm1026_setup 421 .size __arm1026_setup, . - __arm1026_setup
422 422
423 /* 423 /*
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index d42c37f9f5bc..2baa66b3ac9b 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -51,14 +51,14 @@
51 */ 51 */
52ENTRY(cpu_arm720_dcache_clean_area) 52ENTRY(cpu_arm720_dcache_clean_area)
53ENTRY(cpu_arm720_proc_init) 53ENTRY(cpu_arm720_proc_init)
54 mov pc, lr 54 ret lr
55 55
56ENTRY(cpu_arm720_proc_fin) 56ENTRY(cpu_arm720_proc_fin)
57 mrc p15, 0, r0, c1, c0, 0 57 mrc p15, 0, r0, c1, c0, 0
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * Function: arm720_proc_do_idle(void) 64 * Function: arm720_proc_do_idle(void)
@@ -66,7 +66,7 @@ ENTRY(cpu_arm720_proc_fin)
66 * Purpose : put the processor in proper idle mode 66 * Purpose : put the processor in proper idle mode
67 */ 67 */
68ENTRY(cpu_arm720_do_idle) 68ENTRY(cpu_arm720_do_idle)
69 mov pc, lr 69 ret lr
70 70
71/* 71/*
72 * Function: arm720_switch_mm(unsigned long pgd_phys) 72 * Function: arm720_switch_mm(unsigned long pgd_phys)
@@ -81,7 +81,7 @@ ENTRY(cpu_arm720_switch_mm)
81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr 81 mcr p15, 0, r0, c2, c0, 0 @ update page table ptr
82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4) 82 mcr p15, 0, r1, c8, c7, 0 @ flush TLB (v4)
83#endif 83#endif
84 mov pc, lr 84 ret lr
85 85
86/* 86/*
87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext) 87 * Function: arm720_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext)
@@ -94,7 +94,7 @@ ENTRY(cpu_arm720_set_pte_ext)
94#ifdef CONFIG_MMU 94#ifdef CONFIG_MMU
95 armv3_set_pte_ext wc_disable=0 95 armv3_set_pte_ext wc_disable=0
96#endif 96#endif
97 mov pc, lr 97 ret lr
98 98
99/* 99/*
100 * Function: arm720_reset 100 * Function: arm720_reset
@@ -112,7 +112,7 @@ ENTRY(cpu_arm720_reset)
112 bic ip, ip, #0x000f @ ............wcam 112 bic ip, ip, #0x000f @ ............wcam
113 bic ip, ip, #0x2100 @ ..v....s........ 113 bic ip, ip, #0x2100 @ ..v....s........
114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 114 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
115 mov pc, r0 115 ret r0
116ENDPROC(cpu_arm720_reset) 116ENDPROC(cpu_arm720_reset)
117 .popsection 117 .popsection
118 118
@@ -128,7 +128,7 @@ __arm710_setup:
128 bic r0, r0, r5 128 bic r0, r0, r5
129 ldr r5, arm710_cr1_set 129 ldr r5, arm710_cr1_set
130 orr r0, r0, r5 130 orr r0, r0, r5
131 mov pc, lr @ __ret (head.S) 131 ret lr @ __ret (head.S)
132 .size __arm710_setup, . - __arm710_setup 132 .size __arm710_setup, . - __arm710_setup
133 133
134 /* 134 /*
@@ -156,7 +156,7 @@ __arm720_setup:
156 mrc p15, 0, r0, c1, c0 @ get control register 156 mrc p15, 0, r0, c1, c0 @ get control register
157 bic r0, r0, r5 157 bic r0, r0, r5
158 orr r0, r0, r6 158 orr r0, r0, r6
159 mov pc, lr @ __ret (head.S) 159 ret lr @ __ret (head.S)
160 .size __arm720_setup, . - __arm720_setup 160 .size __arm720_setup, . - __arm720_setup
161 161
162 /* 162 /*
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 9b0ae90cbf17..ac1ea6b3bce4 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -32,7 +32,7 @@ ENTRY(cpu_arm740_proc_init)
32ENTRY(cpu_arm740_do_idle) 32ENTRY(cpu_arm740_do_idle)
33ENTRY(cpu_arm740_dcache_clean_area) 33ENTRY(cpu_arm740_dcache_clean_area)
34ENTRY(cpu_arm740_switch_mm) 34ENTRY(cpu_arm740_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm740_proc_fin() 38 * cpu_arm740_proc_fin()
@@ -42,7 +42,7 @@ ENTRY(cpu_arm740_proc_fin)
42 bic r0, r0, #0x3f000000 @ bank/f/lock/s 42 bic r0, r0, #0x3f000000 @ bank/f/lock/s
43 bic r0, r0, #0x0000000c @ w-buffer/cache 43 bic r0, r0, #0x0000000c @ w-buffer/cache
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 mov pc, lr 45 ret lr
46 46
47/* 47/*
48 * cpu_arm740_reset(loc) 48 * cpu_arm740_reset(loc)
@@ -56,7 +56,7 @@ ENTRY(cpu_arm740_reset)
56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register 56 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
57 bic ip, ip, #0x0000000c @ ............wc.. 57 bic ip, ip, #0x0000000c @ ............wc..
58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 58 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
59 mov pc, r0 59 ret r0
60ENDPROC(cpu_arm740_reset) 60ENDPROC(cpu_arm740_reset)
61 .popsection 61 .popsection
62 62
@@ -115,7 +115,7 @@ __arm740_setup:
115 @ need some benchmark 115 @ need some benchmark
116 orr r0, r0, #0x0000000d @ MPU/Cache/WB 116 orr r0, r0, #0x0000000d @ MPU/Cache/WB
117 117
118 mov pc, lr 118 ret lr
119 119
120 .size __arm740_setup, . - __arm740_setup 120 .size __arm740_setup, . - __arm740_setup
121 121
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index f6cc3f63ce39..bf6ba4bc30ff 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm7tdmi_proc_init)
32ENTRY(cpu_arm7tdmi_do_idle) 32ENTRY(cpu_arm7tdmi_do_idle)
33ENTRY(cpu_arm7tdmi_dcache_clean_area) 33ENTRY(cpu_arm7tdmi_dcache_clean_area)
34ENTRY(cpu_arm7tdmi_switch_mm) 34ENTRY(cpu_arm7tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm7tdmi_proc_fin() 38 * cpu_arm7tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm7tdmi_proc_fin) 40ENTRY(cpu_arm7tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm7tdmi_reset(loc) 44 * Function: cpu_arm7tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm7tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm7tdmi_reset) 49ENTRY(cpu_arm7tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm7tdmi_reset) 51ENDPROC(cpu_arm7tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm7tdmi_setup, #function 54 .type __arm7tdmi_setup, #function
55__arm7tdmi_setup: 55__arm7tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm7tdmi_setup, . - __arm7tdmi_setup 57 .size __arm7tdmi_setup, . - __arm7tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 549557df6d57..22bf8dde4f84 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -63,7 +63,7 @@
63 * cpu_arm920_proc_init() 63 * cpu_arm920_proc_init()
64 */ 64 */
65ENTRY(cpu_arm920_proc_init) 65ENTRY(cpu_arm920_proc_init)
66 mov pc, lr 66 ret lr
67 67
68/* 68/*
69 * cpu_arm920_proc_fin() 69 * cpu_arm920_proc_fin()
@@ -73,7 +73,7 @@ ENTRY(cpu_arm920_proc_fin)
73 bic r0, r0, #0x1000 @ ...i............ 73 bic r0, r0, #0x1000 @ ...i............
74 bic r0, r0, #0x000e @ ............wca. 74 bic r0, r0, #0x000e @ ............wca.
75 mcr p15, 0, r0, c1, c0, 0 @ disable caches 75 mcr p15, 0, r0, c1, c0, 0 @ disable caches
76 mov pc, lr 76 ret lr
77 77
78/* 78/*
79 * cpu_arm920_reset(loc) 79 * cpu_arm920_reset(loc)
@@ -97,7 +97,7 @@ ENTRY(cpu_arm920_reset)
97 bic ip, ip, #0x000f @ ............wcam 97 bic ip, ip, #0x000f @ ............wcam
98 bic ip, ip, #0x1100 @ ...i...s........ 98 bic ip, ip, #0x1100 @ ...i...s........
99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 99 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
100 mov pc, r0 100 ret r0
101ENDPROC(cpu_arm920_reset) 101ENDPROC(cpu_arm920_reset)
102 .popsection 102 .popsection
103 103
@@ -107,7 +107,7 @@ ENDPROC(cpu_arm920_reset)
107 .align 5 107 .align 5
108ENTRY(cpu_arm920_do_idle) 108ENTRY(cpu_arm920_do_idle)
109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 109 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
110 mov pc, lr 110 ret lr
111 111
112 112
113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 113#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -120,7 +120,7 @@ ENTRY(cpu_arm920_do_idle)
120ENTRY(arm920_flush_icache_all) 120ENTRY(arm920_flush_icache_all)
121 mov r0, #0 121 mov r0, #0
122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
123 mov pc, lr 123 ret lr
124ENDPROC(arm920_flush_icache_all) 124ENDPROC(arm920_flush_icache_all)
125 125
126/* 126/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -177,7 +177,7 @@ ENTRY(arm920_flush_user_cache_range)
177 blo 1b 177 blo 1b
178 tst r2, #VM_EXEC 178 tst r2, #VM_EXEC
179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 179 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
180 mov pc, lr 180 ret lr
181 181
182/* 182/*
183 * coherent_kern_range(start, end) 183 * coherent_kern_range(start, end)
@@ -211,7 +211,7 @@ ENTRY(arm920_coherent_user_range)
211 blo 1b 211 blo 1b
212 mcr p15, 0, r0, c7, c10, 4 @ drain WB 212 mcr p15, 0, r0, c7, c10, 4 @ drain WB
213 mov r0, #0 213 mov r0, #0
214 mov pc, lr 214 ret lr
215 215
216/* 216/*
217 * flush_kern_dcache_area(void *addr, size_t size) 217 * flush_kern_dcache_area(void *addr, size_t size)
@@ -231,7 +231,7 @@ ENTRY(arm920_flush_kern_dcache_area)
231 mov r0, #0 231 mov r0, #0
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
233 mcr p15, 0, r0, c7, c10, 4 @ drain WB 233 mcr p15, 0, r0, c7, c10, 4 @ drain WB
234 mov pc, lr 234 ret lr
235 235
236/* 236/*
237 * dma_inv_range(start, end) 237 * dma_inv_range(start, end)
@@ -257,7 +257,7 @@ arm920_dma_inv_range:
257 cmp r0, r1 257 cmp r0, r1
258 blo 1b 258 blo 1b
259 mcr p15, 0, r0, c7, c10, 4 @ drain WB 259 mcr p15, 0, r0, c7, c10, 4 @ drain WB
260 mov pc, lr 260 ret lr
261 261
262/* 262/*
263 * dma_clean_range(start, end) 263 * dma_clean_range(start, end)
@@ -276,7 +276,7 @@ arm920_dma_clean_range:
276 cmp r0, r1 276 cmp r0, r1
277 blo 1b 277 blo 1b
278 mcr p15, 0, r0, c7, c10, 4 @ drain WB 278 mcr p15, 0, r0, c7, c10, 4 @ drain WB
279 mov pc, lr 279 ret lr
280 280
281/* 281/*
282 * dma_flush_range(start, end) 282 * dma_flush_range(start, end)
@@ -293,7 +293,7 @@ ENTRY(arm920_dma_flush_range)
293 cmp r0, r1 293 cmp r0, r1
294 blo 1b 294 blo 1b
295 mcr p15, 0, r0, c7, c10, 4 @ drain WB 295 mcr p15, 0, r0, c7, c10, 4 @ drain WB
296 mov pc, lr 296 ret lr
297 297
298/* 298/*
299 * dma_map_area(start, size, dir) 299 * dma_map_area(start, size, dir)
@@ -316,7 +316,7 @@ ENDPROC(arm920_dma_map_area)
316 * - dir - DMA direction 316 * - dir - DMA direction
317 */ 317 */
318ENTRY(arm920_dma_unmap_area) 318ENTRY(arm920_dma_unmap_area)
319 mov pc, lr 319 ret lr
320ENDPROC(arm920_dma_unmap_area) 320ENDPROC(arm920_dma_unmap_area)
321 321
322 .globl arm920_flush_kern_cache_louis 322 .globl arm920_flush_kern_cache_louis
@@ -332,7 +332,7 @@ ENTRY(cpu_arm920_dcache_clean_area)
332 add r0, r0, #CACHE_DLINESIZE 332 add r0, r0, #CACHE_DLINESIZE
333 subs r1, r1, #CACHE_DLINESIZE 333 subs r1, r1, #CACHE_DLINESIZE
334 bhi 1b 334 bhi 1b
335 mov pc, lr 335 ret lr
336 336
337/* =============================== PageTable ============================== */ 337/* =============================== PageTable ============================== */
338 338
@@ -367,7 +367,7 @@ ENTRY(cpu_arm920_switch_mm)
367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 367 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 368 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
369#endif 369#endif
370 mov pc, lr 370 ret lr
371 371
372/* 372/*
373 * cpu_arm920_set_pte(ptep, pte, ext) 373 * cpu_arm920_set_pte(ptep, pte, ext)
@@ -382,7 +382,7 @@ ENTRY(cpu_arm920_set_pte_ext)
382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 382 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
383 mcr p15, 0, r0, c7, c10, 4 @ drain WB 383 mcr p15, 0, r0, c7, c10, 4 @ drain WB
384#endif 384#endif
385 mov pc, lr 385 ret lr
386 386
387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 387/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
388.globl cpu_arm920_suspend_size 388.globl cpu_arm920_suspend_size
@@ -423,7 +423,7 @@ __arm920_setup:
423 mrc p15, 0, r0, c1, c0 @ get control register v4 423 mrc p15, 0, r0, c1, c0 @ get control register v4
424 bic r0, r0, r5 424 bic r0, r0, r5
425 orr r0, r0, r6 425 orr r0, r0, r6
426 mov pc, lr 426 ret lr
427 .size __arm920_setup, . - __arm920_setup 427 .size __arm920_setup, . - __arm920_setup
428 428
429 /* 429 /*
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 2a758b06c6f6..0c6d5ac5a6d4 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -65,7 +65,7 @@
65 * cpu_arm922_proc_init() 65 * cpu_arm922_proc_init()
66 */ 66 */
67ENTRY(cpu_arm922_proc_init) 67ENTRY(cpu_arm922_proc_init)
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm922_proc_fin() 71 * cpu_arm922_proc_fin()
@@ -75,7 +75,7 @@ ENTRY(cpu_arm922_proc_fin)
75 bic r0, r0, #0x1000 @ ...i............ 75 bic r0, r0, #0x1000 @ ...i............
76 bic r0, r0, #0x000e @ ............wca. 76 bic r0, r0, #0x000e @ ............wca.
77 mcr p15, 0, r0, c1, c0, 0 @ disable caches 77 mcr p15, 0, r0, c1, c0, 0 @ disable caches
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * cpu_arm922_reset(loc) 81 * cpu_arm922_reset(loc)
@@ -99,7 +99,7 @@ ENTRY(cpu_arm922_reset)
99 bic ip, ip, #0x000f @ ............wcam 99 bic ip, ip, #0x000f @ ............wcam
100 bic ip, ip, #0x1100 @ ...i...s........ 100 bic ip, ip, #0x1100 @ ...i...s........
101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 101 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
102 mov pc, r0 102 ret r0
103ENDPROC(cpu_arm922_reset) 103ENDPROC(cpu_arm922_reset)
104 .popsection 104 .popsection
105 105
@@ -109,7 +109,7 @@ ENDPROC(cpu_arm922_reset)
109 .align 5 109 .align 5
110ENTRY(cpu_arm922_do_idle) 110ENTRY(cpu_arm922_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mov pc, lr 112 ret lr
113 113
114 114
115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 115#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -122,7 +122,7 @@ ENTRY(cpu_arm922_do_idle)
122ENTRY(arm922_flush_icache_all) 122ENTRY(arm922_flush_icache_all)
123 mov r0, #0 123 mov r0, #0
124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 124 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
125 mov pc, lr 125 ret lr
126ENDPROC(arm922_flush_icache_all) 126ENDPROC(arm922_flush_icache_all)
127 127
128/* 128/*
@@ -153,7 +153,7 @@ __flush_whole_cache:
153 tst r2, #VM_EXEC 153 tst r2, #VM_EXEC
154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 154 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 155 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
156 mov pc, lr 156 ret lr
157 157
158/* 158/*
159 * flush_user_cache_range(start, end, flags) 159 * flush_user_cache_range(start, end, flags)
@@ -179,7 +179,7 @@ ENTRY(arm922_flush_user_cache_range)
179 blo 1b 179 blo 1b
180 tst r2, #VM_EXEC 180 tst r2, #VM_EXEC
181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 181 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
182 mov pc, lr 182 ret lr
183 183
184/* 184/*
185 * coherent_kern_range(start, end) 185 * coherent_kern_range(start, end)
@@ -213,7 +213,7 @@ ENTRY(arm922_coherent_user_range)
213 blo 1b 213 blo 1b
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov r0, #0 215 mov r0, #0
216 mov pc, lr 216 ret lr
217 217
218/* 218/*
219 * flush_kern_dcache_area(void *addr, size_t size) 219 * flush_kern_dcache_area(void *addr, size_t size)
@@ -233,7 +233,7 @@ ENTRY(arm922_flush_kern_dcache_area)
233 mov r0, #0 233 mov r0, #0
234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 234 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
235 mcr p15, 0, r0, c7, c10, 4 @ drain WB 235 mcr p15, 0, r0, c7, c10, 4 @ drain WB
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * dma_inv_range(start, end) 239 * dma_inv_range(start, end)
@@ -259,7 +259,7 @@ arm922_dma_inv_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_clean_range(start, end) 265 * dma_clean_range(start, end)
@@ -278,7 +278,7 @@ arm922_dma_clean_range:
278 cmp r0, r1 278 cmp r0, r1
279 blo 1b 279 blo 1b
280 mcr p15, 0, r0, c7, c10, 4 @ drain WB 280 mcr p15, 0, r0, c7, c10, 4 @ drain WB
281 mov pc, lr 281 ret lr
282 282
283/* 283/*
284 * dma_flush_range(start, end) 284 * dma_flush_range(start, end)
@@ -295,7 +295,7 @@ ENTRY(arm922_dma_flush_range)
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300/* 300/*
301 * dma_map_area(start, size, dir) 301 * dma_map_area(start, size, dir)
@@ -318,7 +318,7 @@ ENDPROC(arm922_dma_map_area)
318 * - dir - DMA direction 318 * - dir - DMA direction
319 */ 319 */
320ENTRY(arm922_dma_unmap_area) 320ENTRY(arm922_dma_unmap_area)
321 mov pc, lr 321 ret lr
322ENDPROC(arm922_dma_unmap_area) 322ENDPROC(arm922_dma_unmap_area)
323 323
324 .globl arm922_flush_kern_cache_louis 324 .globl arm922_flush_kern_cache_louis
@@ -336,7 +336,7 @@ ENTRY(cpu_arm922_dcache_clean_area)
336 subs r1, r1, #CACHE_DLINESIZE 336 subs r1, r1, #CACHE_DLINESIZE
337 bhi 1b 337 bhi 1b
338#endif 338#endif
339 mov pc, lr 339 ret lr
340 340
341/* =============================== PageTable ============================== */ 341/* =============================== PageTable ============================== */
342 342
@@ -371,7 +371,7 @@ ENTRY(cpu_arm922_switch_mm)
371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 371 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 372 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
373#endif 373#endif
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * cpu_arm922_set_pte_ext(ptep, pte, ext) 377 * cpu_arm922_set_pte_ext(ptep, pte, ext)
@@ -386,7 +386,7 @@ ENTRY(cpu_arm922_set_pte_ext)
386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 386 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
387 mcr p15, 0, r0, c7, c10, 4 @ drain WB 387 mcr p15, 0, r0, c7, c10, 4 @ drain WB
388#endif /* CONFIG_MMU */ 388#endif /* CONFIG_MMU */
389 mov pc, lr 389 ret lr
390 390
391 .type __arm922_setup, #function 391 .type __arm922_setup, #function
392__arm922_setup: 392__arm922_setup:
@@ -401,7 +401,7 @@ __arm922_setup:
401 mrc p15, 0, r0, c1, c0 @ get control register v4 401 mrc p15, 0, r0, c1, c0 @ get control register v4
402 bic r0, r0, r5 402 bic r0, r0, r5
403 orr r0, r0, r6 403 orr r0, r0, r6
404 mov pc, lr 404 ret lr
405 .size __arm922_setup, . - __arm922_setup 405 .size __arm922_setup, . - __arm922_setup
406 406
407 /* 407 /*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ba0d58e1a2a2..c32d073282ea 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -86,7 +86,7 @@
86 * cpu_arm925_proc_init() 86 * cpu_arm925_proc_init()
87 */ 87 */
88ENTRY(cpu_arm925_proc_init) 88ENTRY(cpu_arm925_proc_init)
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_arm925_proc_fin() 92 * cpu_arm925_proc_fin()
@@ -96,7 +96,7 @@ ENTRY(cpu_arm925_proc_fin)
96 bic r0, r0, #0x1000 @ ...i............ 96 bic r0, r0, #0x1000 @ ...i............
97 bic r0, r0, #0x000e @ ............wca. 97 bic r0, r0, #0x000e @ ............wca.
98 mcr p15, 0, r0, c1, c0, 0 @ disable caches 98 mcr p15, 0, r0, c1, c0, 0 @ disable caches
99 mov pc, lr 99 ret lr
100 100
101/* 101/*
102 * cpu_arm925_reset(loc) 102 * cpu_arm925_reset(loc)
@@ -129,7 +129,7 @@ ENDPROC(cpu_arm925_reset)
129 bic ip, ip, #0x000f @ ............wcam 129 bic ip, ip, #0x000f @ ............wcam
130 bic ip, ip, #0x1100 @ ...i...s........ 130 bic ip, ip, #0x1100 @ ...i...s........
131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 131 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
132 mov pc, r0 132 ret r0
133 133
134/* 134/*
135 * cpu_arm925_do_idle() 135 * cpu_arm925_do_idle()
@@ -145,7 +145,7 @@ ENTRY(cpu_arm925_do_idle)
145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 145 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache
146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 146 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 147 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
148 mov pc, lr 148 ret lr
149 149
150/* 150/*
151 * flush_icache_all() 151 * flush_icache_all()
@@ -155,7 +155,7 @@ ENTRY(cpu_arm925_do_idle)
155ENTRY(arm925_flush_icache_all) 155ENTRY(arm925_flush_icache_all)
156 mov r0, #0 156 mov r0, #0
157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
158 mov pc, lr 158 ret lr
159ENDPROC(arm925_flush_icache_all) 159ENDPROC(arm925_flush_icache_all)
160 160
161/* 161/*
@@ -188,7 +188,7 @@ __flush_whole_cache:
188 tst r2, #VM_EXEC 188 tst r2, #VM_EXEC
189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 189 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * flush_user_cache_range(start, end, flags) 194 * flush_user_cache_range(start, end, flags)
@@ -225,7 +225,7 @@ ENTRY(arm925_flush_user_cache_range)
225 blo 1b 225 blo 1b
226 tst r2, #VM_EXEC 226 tst r2, #VM_EXEC
227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 227 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
228 mov pc, lr 228 ret lr
229 229
230/* 230/*
231 * coherent_kern_range(start, end) 231 * coherent_kern_range(start, end)
@@ -259,7 +259,7 @@ ENTRY(arm925_coherent_user_range)
259 blo 1b 259 blo 1b
260 mcr p15, 0, r0, c7, c10, 4 @ drain WB 260 mcr p15, 0, r0, c7, c10, 4 @ drain WB
261 mov r0, #0 261 mov r0, #0
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * flush_kern_dcache_area(void *addr, size_t size) 265 * flush_kern_dcache_area(void *addr, size_t size)
@@ -279,7 +279,7 @@ ENTRY(arm925_flush_kern_dcache_area)
279 mov r0, #0 279 mov r0, #0
280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 280 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
281 mcr p15, 0, r0, c7, c10, 4 @ drain WB 281 mcr p15, 0, r0, c7, c10, 4 @ drain WB
282 mov pc, lr 282 ret lr
283 283
284/* 284/*
285 * dma_inv_range(start, end) 285 * dma_inv_range(start, end)
@@ -307,7 +307,7 @@ arm925_dma_inv_range:
307 cmp r0, r1 307 cmp r0, r1
308 blo 1b 308 blo 1b
309 mcr p15, 0, r0, c7, c10, 4 @ drain WB 309 mcr p15, 0, r0, c7, c10, 4 @ drain WB
310 mov pc, lr 310 ret lr
311 311
312/* 312/*
313 * dma_clean_range(start, end) 313 * dma_clean_range(start, end)
@@ -328,7 +328,7 @@ arm925_dma_clean_range:
328 blo 1b 328 blo 1b
329#endif 329#endif
330 mcr p15, 0, r0, c7, c10, 4 @ drain WB 330 mcr p15, 0, r0, c7, c10, 4 @ drain WB
331 mov pc, lr 331 ret lr
332 332
333/* 333/*
334 * dma_flush_range(start, end) 334 * dma_flush_range(start, end)
@@ -350,7 +350,7 @@ ENTRY(arm925_dma_flush_range)
350 cmp r0, r1 350 cmp r0, r1
351 blo 1b 351 blo 1b
352 mcr p15, 0, r0, c7, c10, 4 @ drain WB 352 mcr p15, 0, r0, c7, c10, 4 @ drain WB
353 mov pc, lr 353 ret lr
354 354
355/* 355/*
356 * dma_map_area(start, size, dir) 356 * dma_map_area(start, size, dir)
@@ -373,7 +373,7 @@ ENDPROC(arm925_dma_map_area)
373 * - dir - DMA direction 373 * - dir - DMA direction
374 */ 374 */
375ENTRY(arm925_dma_unmap_area) 375ENTRY(arm925_dma_unmap_area)
376 mov pc, lr 376 ret lr
377ENDPROC(arm925_dma_unmap_area) 377ENDPROC(arm925_dma_unmap_area)
378 378
379 .globl arm925_flush_kern_cache_louis 379 .globl arm925_flush_kern_cache_louis
@@ -390,7 +390,7 @@ ENTRY(cpu_arm925_dcache_clean_area)
390 bhi 1b 390 bhi 1b
391#endif 391#endif
392 mcr p15, 0, r0, c7, c10, 4 @ drain WB 392 mcr p15, 0, r0, c7, c10, 4 @ drain WB
393 mov pc, lr 393 ret lr
394 394
395/* =============================== PageTable ============================== */ 395/* =============================== PageTable ============================== */
396 396
@@ -419,7 +419,7 @@ ENTRY(cpu_arm925_switch_mm)
419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 419 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
421#endif 421#endif
422 mov pc, lr 422 ret lr
423 423
424/* 424/*
425 * cpu_arm925_set_pte_ext(ptep, pte, ext) 425 * cpu_arm925_set_pte_ext(ptep, pte, ext)
@@ -436,7 +436,7 @@ ENTRY(cpu_arm925_set_pte_ext)
436#endif 436#endif
437 mcr p15, 0, r0, c7, c10, 4 @ drain WB 437 mcr p15, 0, r0, c7, c10, 4 @ drain WB
438#endif /* CONFIG_MMU */ 438#endif /* CONFIG_MMU */
439 mov pc, lr 439 ret lr
440 440
441 .type __arm925_setup, #function 441 .type __arm925_setup, #function
442__arm925_setup: 442__arm925_setup:
@@ -469,7 +469,7 @@ __arm925_setup:
469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 469#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
470 orr r0, r0, #0x4000 @ .1.. .... .... .... 470 orr r0, r0, #0x4000 @ .1.. .... .... ....
471#endif 471#endif
472 mov pc, lr 472 ret lr
473 .size __arm925_setup, . - __arm925_setup 473 .size __arm925_setup, . - __arm925_setup
474 474
475 /* 475 /*
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0f098f407c9f..252b2503038d 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -55,7 +55,7 @@
55 * cpu_arm926_proc_init() 55 * cpu_arm926_proc_init()
56 */ 56 */
57ENTRY(cpu_arm926_proc_init) 57ENTRY(cpu_arm926_proc_init)
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_arm926_proc_fin() 61 * cpu_arm926_proc_fin()
@@ -65,7 +65,7 @@ ENTRY(cpu_arm926_proc_fin)
65 bic r0, r0, #0x1000 @ ...i............ 65 bic r0, r0, #0x1000 @ ...i............
66 bic r0, r0, #0x000e @ ............wca. 66 bic r0, r0, #0x000e @ ............wca.
67 mcr p15, 0, r0, c1, c0, 0 @ disable caches 67 mcr p15, 0, r0, c1, c0, 0 @ disable caches
68 mov pc, lr 68 ret lr
69 69
70/* 70/*
71 * cpu_arm926_reset(loc) 71 * cpu_arm926_reset(loc)
@@ -89,7 +89,7 @@ ENTRY(cpu_arm926_reset)
89 bic ip, ip, #0x000f @ ............wcam 89 bic ip, ip, #0x000f @ ............wcam
90 bic ip, ip, #0x1100 @ ...i...s........ 90 bic ip, ip, #0x1100 @ ...i...s........
91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 91 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
92 mov pc, r0 92 ret r0
93ENDPROC(cpu_arm926_reset) 93ENDPROC(cpu_arm926_reset)
94 .popsection 94 .popsection
95 95
@@ -111,7 +111,7 @@ ENTRY(cpu_arm926_do_idle)
111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 111 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 112 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable
113 msr cpsr_c, r3 @ Restore FIQ state 113 msr cpsr_c, r3 @ Restore FIQ state
114 mov pc, lr 114 ret lr
115 115
116/* 116/*
117 * flush_icache_all() 117 * flush_icache_all()
@@ -121,7 +121,7 @@ ENTRY(cpu_arm926_do_idle)
121ENTRY(arm926_flush_icache_all) 121ENTRY(arm926_flush_icache_all)
122 mov r0, #0 122 mov r0, #0
123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 123 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
124 mov pc, lr 124 ret lr
125ENDPROC(arm926_flush_icache_all) 125ENDPROC(arm926_flush_icache_all)
126 126
127/* 127/*
@@ -151,7 +151,7 @@ __flush_whole_cache:
151 tst r2, #VM_EXEC 151 tst r2, #VM_EXEC
152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 mov pc, lr 154 ret lr
155 155
156/* 156/*
157 * flush_user_cache_range(start, end, flags) 157 * flush_user_cache_range(start, end, flags)
@@ -188,7 +188,7 @@ ENTRY(arm926_flush_user_cache_range)
188 blo 1b 188 blo 1b
189 tst r2, #VM_EXEC 189 tst r2, #VM_EXEC
190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 190 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
191 mov pc, lr 191 ret lr
192 192
193/* 193/*
194 * coherent_kern_range(start, end) 194 * coherent_kern_range(start, end)
@@ -222,7 +222,7 @@ ENTRY(arm926_coherent_user_range)
222 blo 1b 222 blo 1b
223 mcr p15, 0, r0, c7, c10, 4 @ drain WB 223 mcr p15, 0, r0, c7, c10, 4 @ drain WB
224 mov r0, #0 224 mov r0, #0
225 mov pc, lr 225 ret lr
226 226
227/* 227/*
228 * flush_kern_dcache_area(void *addr, size_t size) 228 * flush_kern_dcache_area(void *addr, size_t size)
@@ -242,7 +242,7 @@ ENTRY(arm926_flush_kern_dcache_area)
242 mov r0, #0 242 mov r0, #0
243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 243 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
244 mcr p15, 0, r0, c7, c10, 4 @ drain WB 244 mcr p15, 0, r0, c7, c10, 4 @ drain WB
245 mov pc, lr 245 ret lr
246 246
247/* 247/*
248 * dma_inv_range(start, end) 248 * dma_inv_range(start, end)
@@ -270,7 +270,7 @@ arm926_dma_inv_range:
270 cmp r0, r1 270 cmp r0, r1
271 blo 1b 271 blo 1b
272 mcr p15, 0, r0, c7, c10, 4 @ drain WB 272 mcr p15, 0, r0, c7, c10, 4 @ drain WB
273 mov pc, lr 273 ret lr
274 274
275/* 275/*
276 * dma_clean_range(start, end) 276 * dma_clean_range(start, end)
@@ -291,7 +291,7 @@ arm926_dma_clean_range:
291 blo 1b 291 blo 1b
292#endif 292#endif
293 mcr p15, 0, r0, c7, c10, 4 @ drain WB 293 mcr p15, 0, r0, c7, c10, 4 @ drain WB
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * dma_flush_range(start, end) 297 * dma_flush_range(start, end)
@@ -313,7 +313,7 @@ ENTRY(arm926_dma_flush_range)
313 cmp r0, r1 313 cmp r0, r1
314 blo 1b 314 blo 1b
315 mcr p15, 0, r0, c7, c10, 4 @ drain WB 315 mcr p15, 0, r0, c7, c10, 4 @ drain WB
316 mov pc, lr 316 ret lr
317 317
318/* 318/*
319 * dma_map_area(start, size, dir) 319 * dma_map_area(start, size, dir)
@@ -336,7 +336,7 @@ ENDPROC(arm926_dma_map_area)
336 * - dir - DMA direction 336 * - dir - DMA direction
337 */ 337 */
338ENTRY(arm926_dma_unmap_area) 338ENTRY(arm926_dma_unmap_area)
339 mov pc, lr 339 ret lr
340ENDPROC(arm926_dma_unmap_area) 340ENDPROC(arm926_dma_unmap_area)
341 341
342 .globl arm926_flush_kern_cache_louis 342 .globl arm926_flush_kern_cache_louis
@@ -353,7 +353,7 @@ ENTRY(cpu_arm926_dcache_clean_area)
353 bhi 1b 353 bhi 1b
354#endif 354#endif
355 mcr p15, 0, r0, c7, c10, 4 @ drain WB 355 mcr p15, 0, r0, c7, c10, 4 @ drain WB
356 mov pc, lr 356 ret lr
357 357
358/* =============================== PageTable ============================== */ 358/* =============================== PageTable ============================== */
359 359
@@ -380,7 +380,7 @@ ENTRY(cpu_arm926_switch_mm)
380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 380 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 381 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
382#endif 382#endif
383 mov pc, lr 383 ret lr
384 384
385/* 385/*
386 * cpu_arm926_set_pte_ext(ptep, pte, ext) 386 * cpu_arm926_set_pte_ext(ptep, pte, ext)
@@ -397,7 +397,7 @@ ENTRY(cpu_arm926_set_pte_ext)
397#endif 397#endif
398 mcr p15, 0, r0, c7, c10, 4 @ drain WB 398 mcr p15, 0, r0, c7, c10, 4 @ drain WB
399#endif 399#endif
400 mov pc, lr 400 ret lr
401 401
402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ 402/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
403.globl cpu_arm926_suspend_size 403.globl cpu_arm926_suspend_size
@@ -448,7 +448,7 @@ __arm926_setup:
448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 448#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
449 orr r0, r0, #0x4000 @ .1.. .... .... .... 449 orr r0, r0, #0x4000 @ .1.. .... .... ....
450#endif 450#endif
451 mov pc, lr 451 ret lr
452 .size __arm926_setup, . - __arm926_setup 452 .size __arm926_setup, . - __arm926_setup
453 453
454 /* 454 /*
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 1c39a704ff6e..e5212d489377 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -31,7 +31,7 @@
31 */ 31 */
32ENTRY(cpu_arm940_proc_init) 32ENTRY(cpu_arm940_proc_init)
33ENTRY(cpu_arm940_switch_mm) 33ENTRY(cpu_arm940_switch_mm)
34 mov pc, lr 34 ret lr
35 35
36/* 36/*
37 * cpu_arm940_proc_fin() 37 * cpu_arm940_proc_fin()
@@ -41,7 +41,7 @@ ENTRY(cpu_arm940_proc_fin)
41 bic r0, r0, #0x00001000 @ i-cache 41 bic r0, r0, #0x00001000 @ i-cache
42 bic r0, r0, #0x00000004 @ d-cache 42 bic r0, r0, #0x00000004 @ d-cache
43 mcr p15, 0, r0, c1, c0, 0 @ disable caches 43 mcr p15, 0, r0, c1, c0, 0 @ disable caches
44 mov pc, lr 44 ret lr
45 45
46/* 46/*
47 * cpu_arm940_reset(loc) 47 * cpu_arm940_reset(loc)
@@ -58,7 +58,7 @@ ENTRY(cpu_arm940_reset)
58 bic ip, ip, #0x00000005 @ .............c.p 58 bic ip, ip, #0x00000005 @ .............c.p
59 bic ip, ip, #0x00001000 @ i-cache 59 bic ip, ip, #0x00001000 @ i-cache
60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 60 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
61 mov pc, r0 61 ret r0
62ENDPROC(cpu_arm940_reset) 62ENDPROC(cpu_arm940_reset)
63 .popsection 63 .popsection
64 64
@@ -68,7 +68,7 @@ ENDPROC(cpu_arm940_reset)
68 .align 5 68 .align 5
69ENTRY(cpu_arm940_do_idle) 69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr 71 ret lr
72 72
73/* 73/*
74 * flush_icache_all() 74 * flush_icache_all()
@@ -78,7 +78,7 @@ ENTRY(cpu_arm940_do_idle)
78ENTRY(arm940_flush_icache_all) 78ENTRY(arm940_flush_icache_all)
79 mov r0, #0 79 mov r0, #0
80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
81 mov pc, lr 81 ret lr
82ENDPROC(arm940_flush_icache_all) 82ENDPROC(arm940_flush_icache_all)
83 83
84/* 84/*
@@ -122,7 +122,7 @@ ENTRY(arm940_flush_user_cache_range)
122 tst r2, #VM_EXEC 122 tst r2, #VM_EXEC
123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 124 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
125 mov pc, lr 125 ret lr
126 126
127/* 127/*
128 * coherent_kern_range(start, end) 128 * coherent_kern_range(start, end)
@@ -170,7 +170,7 @@ ENTRY(arm940_flush_kern_dcache_area)
170 bcs 1b @ segments 7 to 0 170 bcs 1b @ segments 7 to 0
171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
172 mcr p15, 0, r0, c7, c10, 4 @ drain WB 172 mcr p15, 0, r0, c7, c10, 4 @ drain WB
173 mov pc, lr 173 ret lr
174 174
175/* 175/*
176 * dma_inv_range(start, end) 176 * dma_inv_range(start, end)
@@ -191,7 +191,7 @@ arm940_dma_inv_range:
191 subs r1, r1, #1 << 4 191 subs r1, r1, #1 << 4
192 bcs 1b @ segments 7 to 0 192 bcs 1b @ segments 7 to 0
193 mcr p15, 0, ip, c7, c10, 4 @ drain WB 193 mcr p15, 0, ip, c7, c10, 4 @ drain WB
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * dma_clean_range(start, end) 197 * dma_clean_range(start, end)
@@ -215,7 +215,7 @@ ENTRY(cpu_arm940_dcache_clean_area)
215 bcs 1b @ segments 7 to 0 215 bcs 1b @ segments 7 to 0
216#endif 216#endif
217 mcr p15, 0, ip, c7, c10, 4 @ drain WB 217 mcr p15, 0, ip, c7, c10, 4 @ drain WB
218 mov pc, lr 218 ret lr
219 219
220/* 220/*
221 * dma_flush_range(start, end) 221 * dma_flush_range(start, end)
@@ -241,7 +241,7 @@ ENTRY(arm940_dma_flush_range)
241 subs r1, r1, #1 << 4 241 subs r1, r1, #1 << 4
242 bcs 1b @ segments 7 to 0 242 bcs 1b @ segments 7 to 0
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 mov pc, lr 244 ret lr
245 245
246/* 246/*
247 * dma_map_area(start, size, dir) 247 * dma_map_area(start, size, dir)
@@ -264,7 +264,7 @@ ENDPROC(arm940_dma_map_area)
264 * - dir - DMA direction 264 * - dir - DMA direction
265 */ 265 */
266ENTRY(arm940_dma_unmap_area) 266ENTRY(arm940_dma_unmap_area)
267 mov pc, lr 267 ret lr
268ENDPROC(arm940_dma_unmap_area) 268ENDPROC(arm940_dma_unmap_area)
269 269
270 .globl arm940_flush_kern_cache_louis 270 .globl arm940_flush_kern_cache_louis
@@ -337,7 +337,7 @@ __arm940_setup:
337 orr r0, r0, #0x00001000 @ I-cache 337 orr r0, r0, #0x00001000 @ I-cache
338 orr r0, r0, #0x00000005 @ MPU/D-cache 338 orr r0, r0, #0x00000005 @ MPU/D-cache
339 339
340 mov pc, lr 340 ret lr
341 341
342 .size __arm940_setup, . - __arm940_setup 342 .size __arm940_setup, . - __arm940_setup
343 343
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 0289cd905e73..b3dd9b2d0b8e 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -38,7 +38,7 @@
38 */ 38 */
39ENTRY(cpu_arm946_proc_init) 39ENTRY(cpu_arm946_proc_init)
40ENTRY(cpu_arm946_switch_mm) 40ENTRY(cpu_arm946_switch_mm)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_arm946_proc_fin() 44 * cpu_arm946_proc_fin()
@@ -48,7 +48,7 @@ ENTRY(cpu_arm946_proc_fin)
48 bic r0, r0, #0x00001000 @ i-cache 48 bic r0, r0, #0x00001000 @ i-cache
49 bic r0, r0, #0x00000004 @ d-cache 49 bic r0, r0, #0x00000004 @ d-cache
50 mcr p15, 0, r0, c1, c0, 0 @ disable caches 50 mcr p15, 0, r0, c1, c0, 0 @ disable caches
51 mov pc, lr 51 ret lr
52 52
53/* 53/*
54 * cpu_arm946_reset(loc) 54 * cpu_arm946_reset(loc)
@@ -65,7 +65,7 @@ ENTRY(cpu_arm946_reset)
65 bic ip, ip, #0x00000005 @ .............c.p 65 bic ip, ip, #0x00000005 @ .............c.p
66 bic ip, ip, #0x00001000 @ i-cache 66 bic ip, ip, #0x00001000 @ i-cache
67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 67 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
68 mov pc, r0 68 ret r0
69ENDPROC(cpu_arm946_reset) 69ENDPROC(cpu_arm946_reset)
70 .popsection 70 .popsection
71 71
@@ -75,7 +75,7 @@ ENDPROC(cpu_arm946_reset)
75 .align 5 75 .align 5
76ENTRY(cpu_arm946_do_idle) 76ENTRY(cpu_arm946_do_idle)
77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
78 mov pc, lr 78 ret lr
79 79
80/* 80/*
81 * flush_icache_all() 81 * flush_icache_all()
@@ -85,7 +85,7 @@ ENTRY(cpu_arm946_do_idle)
85ENTRY(arm946_flush_icache_all) 85ENTRY(arm946_flush_icache_all)
86 mov r0, #0 86 mov r0, #0
87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
88 mov pc, lr 88 ret lr
89ENDPROC(arm946_flush_icache_all) 89ENDPROC(arm946_flush_icache_all)
90 90
91/* 91/*
@@ -117,7 +117,7 @@ __flush_whole_cache:
117 tst r2, #VM_EXEC 117 tst r2, #VM_EXEC
118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 119 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * flush_user_cache_range(start, end, flags) 123 * flush_user_cache_range(start, end, flags)
@@ -156,7 +156,7 @@ ENTRY(arm946_flush_user_cache_range)
156 blo 1b 156 blo 1b
157 tst r2, #VM_EXEC 157 tst r2, #VM_EXEC
158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 158 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
159 mov pc, lr 159 ret lr
160 160
161/* 161/*
162 * coherent_kern_range(start, end) 162 * coherent_kern_range(start, end)
@@ -191,7 +191,7 @@ ENTRY(arm946_coherent_user_range)
191 blo 1b 191 blo 1b
192 mcr p15, 0, r0, c7, c10, 4 @ drain WB 192 mcr p15, 0, r0, c7, c10, 4 @ drain WB
193 mov r0, #0 193 mov r0, #0
194 mov pc, lr 194 ret lr
195 195
196/* 196/*
197 * flush_kern_dcache_area(void *addr, size_t size) 197 * flush_kern_dcache_area(void *addr, size_t size)
@@ -212,7 +212,7 @@ ENTRY(arm946_flush_kern_dcache_area)
212 mov r0, #0 212 mov r0, #0
213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
214 mcr p15, 0, r0, c7, c10, 4 @ drain WB 214 mcr p15, 0, r0, c7, c10, 4 @ drain WB
215 mov pc, lr 215 ret lr
216 216
217/* 217/*
218 * dma_inv_range(start, end) 218 * dma_inv_range(start, end)
@@ -239,7 +239,7 @@ arm946_dma_inv_range:
239 cmp r0, r1 239 cmp r0, r1
240 blo 1b 240 blo 1b
241 mcr p15, 0, r0, c7, c10, 4 @ drain WB 241 mcr p15, 0, r0, c7, c10, 4 @ drain WB
242 mov pc, lr 242 ret lr
243 243
244/* 244/*
245 * dma_clean_range(start, end) 245 * dma_clean_range(start, end)
@@ -260,7 +260,7 @@ arm946_dma_clean_range:
260 blo 1b 260 blo 1b
261#endif 261#endif
262 mcr p15, 0, r0, c7, c10, 4 @ drain WB 262 mcr p15, 0, r0, c7, c10, 4 @ drain WB
263 mov pc, lr 263 ret lr
264 264
265/* 265/*
266 * dma_flush_range(start, end) 266 * dma_flush_range(start, end)
@@ -284,7 +284,7 @@ ENTRY(arm946_dma_flush_range)
284 cmp r0, r1 284 cmp r0, r1
285 blo 1b 285 blo 1b
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB 286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
287 mov pc, lr 287 ret lr
288 288
289/* 289/*
290 * dma_map_area(start, size, dir) 290 * dma_map_area(start, size, dir)
@@ -307,7 +307,7 @@ ENDPROC(arm946_dma_map_area)
307 * - dir - DMA direction 307 * - dir - DMA direction
308 */ 308 */
309ENTRY(arm946_dma_unmap_area) 309ENTRY(arm946_dma_unmap_area)
310 mov pc, lr 310 ret lr
311ENDPROC(arm946_dma_unmap_area) 311ENDPROC(arm946_dma_unmap_area)
312 312
313 .globl arm946_flush_kern_cache_louis 313 .globl arm946_flush_kern_cache_louis
@@ -324,7 +324,7 @@ ENTRY(cpu_arm946_dcache_clean_area)
324 bhi 1b 324 bhi 1b
325#endif 325#endif
326 mcr p15, 0, r0, c7, c10, 4 @ drain WB 326 mcr p15, 0, r0, c7, c10, 4 @ drain WB
327 mov pc, lr 327 ret lr
328 328
329 .type __arm946_setup, #function 329 .type __arm946_setup, #function
330__arm946_setup: 330__arm946_setup:
@@ -392,7 +392,7 @@ __arm946_setup:
392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 392#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
393 orr r0, r0, #0x00004000 @ .1.. .... .... .... 393 orr r0, r0, #0x00004000 @ .1.. .... .... ....
394#endif 394#endif
395 mov pc, lr 395 ret lr
396 396
397 .size __arm946_setup, . - __arm946_setup 397 .size __arm946_setup, . - __arm946_setup
398 398
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index f51197ba754a..8227322bbb8f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -32,13 +32,13 @@ ENTRY(cpu_arm9tdmi_proc_init)
32ENTRY(cpu_arm9tdmi_do_idle) 32ENTRY(cpu_arm9tdmi_do_idle)
33ENTRY(cpu_arm9tdmi_dcache_clean_area) 33ENTRY(cpu_arm9tdmi_dcache_clean_area)
34ENTRY(cpu_arm9tdmi_switch_mm) 34ENTRY(cpu_arm9tdmi_switch_mm)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_arm9tdmi_proc_fin() 38 * cpu_arm9tdmi_proc_fin()
39 */ 39 */
40ENTRY(cpu_arm9tdmi_proc_fin) 40ENTRY(cpu_arm9tdmi_proc_fin)
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * Function: cpu_arm9tdmi_reset(loc) 44 * Function: cpu_arm9tdmi_reset(loc)
@@ -47,13 +47,13 @@ ENTRY(cpu_arm9tdmi_proc_fin)
47 */ 47 */
48 .pushsection .idmap.text, "ax" 48 .pushsection .idmap.text, "ax"
49ENTRY(cpu_arm9tdmi_reset) 49ENTRY(cpu_arm9tdmi_reset)
50 mov pc, r0 50 ret r0
51ENDPROC(cpu_arm9tdmi_reset) 51ENDPROC(cpu_arm9tdmi_reset)
52 .popsection 52 .popsection
53 53
54 .type __arm9tdmi_setup, #function 54 .type __arm9tdmi_setup, #function
55__arm9tdmi_setup: 55__arm9tdmi_setup:
56 mov pc, lr 56 ret lr
57 .size __arm9tdmi_setup, . - __arm9tdmi_setup 57 .size __arm9tdmi_setup, . - __arm9tdmi_setup
58 58
59 __INITDATA 59 __INITDATA
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 2dfc0f1d3bfd..c494886892ba 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -32,7 +32,7 @@
32 * cpu_fa526_proc_init() 32 * cpu_fa526_proc_init()
33 */ 33 */
34ENTRY(cpu_fa526_proc_init) 34ENTRY(cpu_fa526_proc_init)
35 mov pc, lr 35 ret lr
36 36
37/* 37/*
38 * cpu_fa526_proc_fin() 38 * cpu_fa526_proc_fin()
@@ -44,7 +44,7 @@ ENTRY(cpu_fa526_proc_fin)
44 mcr p15, 0, r0, c1, c0, 0 @ disable caches 44 mcr p15, 0, r0, c1, c0, 0 @ disable caches
45 nop 45 nop
46 nop 46 nop
47 mov pc, lr 47 ret lr
48 48
49/* 49/*
50 * cpu_fa526_reset(loc) 50 * cpu_fa526_reset(loc)
@@ -72,7 +72,7 @@ ENTRY(cpu_fa526_reset)
72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 72 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
73 nop 73 nop
74 nop 74 nop
75 mov pc, r0 75 ret r0
76ENDPROC(cpu_fa526_reset) 76ENDPROC(cpu_fa526_reset)
77 .popsection 77 .popsection
78 78
@@ -81,7 +81,7 @@ ENDPROC(cpu_fa526_reset)
81 */ 81 */
82 .align 4 82 .align 4
83ENTRY(cpu_fa526_do_idle) 83ENTRY(cpu_fa526_do_idle)
84 mov pc, lr 84 ret lr
85 85
86 86
87ENTRY(cpu_fa526_dcache_clean_area) 87ENTRY(cpu_fa526_dcache_clean_area)
@@ -90,7 +90,7 @@ ENTRY(cpu_fa526_dcache_clean_area)
90 subs r1, r1, #CACHE_DLINESIZE 90 subs r1, r1, #CACHE_DLINESIZE
91 bhi 1b 91 bhi 1b
92 mcr p15, 0, r0, c7, c10, 4 @ drain WB 92 mcr p15, 0, r0, c7, c10, 4 @ drain WB
93 mov pc, lr 93 ret lr
94 94
95/* =============================== PageTable ============================== */ 95/* =============================== PageTable ============================== */
96 96
@@ -117,7 +117,7 @@ ENTRY(cpu_fa526_switch_mm)
117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 117 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB 118 mcr p15, 0, ip, c8, c7, 0 @ invalidate UTLB
119#endif 119#endif
120 mov pc, lr 120 ret lr
121 121
122/* 122/*
123 * cpu_fa526_set_pte_ext(ptep, pte, ext) 123 * cpu_fa526_set_pte_ext(ptep, pte, ext)
@@ -133,7 +133,7 @@ ENTRY(cpu_fa526_set_pte_ext)
133 mov r0, #0 133 mov r0, #0
134 mcr p15, 0, r0, c7, c10, 4 @ drain WB 134 mcr p15, 0, r0, c7, c10, 4 @ drain WB
135#endif 135#endif
136 mov pc, lr 136 ret lr
137 137
138 .type __fa526_setup, #function 138 .type __fa526_setup, #function
139__fa526_setup: 139__fa526_setup:
@@ -162,7 +162,7 @@ __fa526_setup:
162 bic r0, r0, r5 162 bic r0, r0, r5
163 ldr r5, fa526_cr1_set 163 ldr r5, fa526_cr1_set
164 orr r0, r0, r5 164 orr r0, r0, r5
165 mov pc, lr 165 ret lr
166 .size __fa526_setup, . - __fa526_setup 166 .size __fa526_setup, . - __fa526_setup
167 167
168 /* 168 /*
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index db79b62c92fb..03a1b75f2e16 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -69,7 +69,7 @@ ENTRY(cpu_feroceon_proc_init)
69 movne r2, r2, lsr #2 @ turned into # of sets 69 movne r2, r2, lsr #2 @ turned into # of sets
70 sub r2, r2, #(1 << 5) 70 sub r2, r2, #(1 << 5)
71 stmia r1, {r2, r3} 71 stmia r1, {r2, r3}
72 mov pc, lr 72 ret lr
73 73
74/* 74/*
75 * cpu_feroceon_proc_fin() 75 * cpu_feroceon_proc_fin()
@@ -86,7 +86,7 @@ ENTRY(cpu_feroceon_proc_fin)
86 bic r0, r0, #0x1000 @ ...i............ 86 bic r0, r0, #0x1000 @ ...i............
87 bic r0, r0, #0x000e @ ............wca. 87 bic r0, r0, #0x000e @ ............wca.
88 mcr p15, 0, r0, c1, c0, 0 @ disable caches 88 mcr p15, 0, r0, c1, c0, 0 @ disable caches
89 mov pc, lr 89 ret lr
90 90
91/* 91/*
92 * cpu_feroceon_reset(loc) 92 * cpu_feroceon_reset(loc)
@@ -110,7 +110,7 @@ ENTRY(cpu_feroceon_reset)
110 bic ip, ip, #0x000f @ ............wcam 110 bic ip, ip, #0x000f @ ............wcam
111 bic ip, ip, #0x1100 @ ...i...s........ 111 bic ip, ip, #0x1100 @ ...i...s........
112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 112 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
113 mov pc, r0 113 ret r0
114ENDPROC(cpu_feroceon_reset) 114ENDPROC(cpu_feroceon_reset)
115 .popsection 115 .popsection
116 116
@@ -124,7 +124,7 @@ ENTRY(cpu_feroceon_do_idle)
124 mov r0, #0 124 mov r0, #0
125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 125 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer
126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 126 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
127 mov pc, lr 127 ret lr
128 128
129/* 129/*
130 * flush_icache_all() 130 * flush_icache_all()
@@ -134,7 +134,7 @@ ENTRY(cpu_feroceon_do_idle)
134ENTRY(feroceon_flush_icache_all) 134ENTRY(feroceon_flush_icache_all)
135 mov r0, #0 135 mov r0, #0
136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
137 mov pc, lr 137 ret lr
138ENDPROC(feroceon_flush_icache_all) 138ENDPROC(feroceon_flush_icache_all)
139 139
140/* 140/*
@@ -169,7 +169,7 @@ __flush_whole_cache:
169 mov ip, #0 169 mov ip, #0
170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 171 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
172 mov pc, lr 172 ret lr
173 173
174/* 174/*
175 * flush_user_cache_range(start, end, flags) 175 * flush_user_cache_range(start, end, flags)
@@ -198,7 +198,7 @@ ENTRY(feroceon_flush_user_cache_range)
198 tst r2, #VM_EXEC 198 tst r2, #VM_EXEC
199 mov ip, #0 199 mov ip, #0
200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 200 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
201 mov pc, lr 201 ret lr
202 202
203/* 203/*
204 * coherent_kern_range(start, end) 204 * coherent_kern_range(start, end)
@@ -233,7 +233,7 @@ ENTRY(feroceon_coherent_user_range)
233 blo 1b 233 blo 1b
234 mcr p15, 0, r0, c7, c10, 4 @ drain WB 234 mcr p15, 0, r0, c7, c10, 4 @ drain WB
235 mov r0, #0 235 mov r0, #0
236 mov pc, lr 236 ret lr
237 237
238/* 238/*
239 * flush_kern_dcache_area(void *addr, size_t size) 239 * flush_kern_dcache_area(void *addr, size_t size)
@@ -254,7 +254,7 @@ ENTRY(feroceon_flush_kern_dcache_area)
254 mov r0, #0 254 mov r0, #0
255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
256 mcr p15, 0, r0, c7, c10, 4 @ drain WB 256 mcr p15, 0, r0, c7, c10, 4 @ drain WB
257 mov pc, lr 257 ret lr
258 258
259 .align 5 259 .align 5
260ENTRY(feroceon_range_flush_kern_dcache_area) 260ENTRY(feroceon_range_flush_kern_dcache_area)
@@ -268,7 +268,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
268 mov r0, #0 268 mov r0, #0
269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
270 mcr p15, 0, r0, c7, c10, 4 @ drain WB 270 mcr p15, 0, r0, c7, c10, 4 @ drain WB
271 mov pc, lr 271 ret lr
272 272
273/* 273/*
274 * dma_inv_range(start, end) 274 * dma_inv_range(start, end)
@@ -295,7 +295,7 @@ feroceon_dma_inv_range:
295 cmp r0, r1 295 cmp r0, r1
296 blo 1b 296 blo 1b
297 mcr p15, 0, r0, c7, c10, 4 @ drain WB 297 mcr p15, 0, r0, c7, c10, 4 @ drain WB
298 mov pc, lr 298 ret lr
299 299
300 .align 5 300 .align 5
301feroceon_range_dma_inv_range: 301feroceon_range_dma_inv_range:
@@ -311,7 +311,7 @@ feroceon_range_dma_inv_range:
311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start 311 mcr p15, 5, r0, c15, c14, 0 @ D inv range start
312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top 312 mcr p15, 5, r1, c15, c14, 1 @ D inv range top
313 msr cpsr_c, r2 @ restore interrupts 313 msr cpsr_c, r2 @ restore interrupts
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_clean_range(start, end) 317 * dma_clean_range(start, end)
@@ -331,7 +331,7 @@ feroceon_dma_clean_range:
331 cmp r0, r1 331 cmp r0, r1
332 blo 1b 332 blo 1b
333 mcr p15, 0, r0, c7, c10, 4 @ drain WB 333 mcr p15, 0, r0, c7, c10, 4 @ drain WB
334 mov pc, lr 334 ret lr
335 335
336 .align 5 336 .align 5
337feroceon_range_dma_clean_range: 337feroceon_range_dma_clean_range:
@@ -344,7 +344,7 @@ feroceon_range_dma_clean_range:
344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top 344 mcr p15, 5, r1, c15, c13, 1 @ D clean range top
345 msr cpsr_c, r2 @ restore interrupts 345 msr cpsr_c, r2 @ restore interrupts
346 mcr p15, 0, r0, c7, c10, 4 @ drain WB 346 mcr p15, 0, r0, c7, c10, 4 @ drain WB
347 mov pc, lr 347 ret lr
348 348
349/* 349/*
350 * dma_flush_range(start, end) 350 * dma_flush_range(start, end)
@@ -362,7 +362,7 @@ ENTRY(feroceon_dma_flush_range)
362 cmp r0, r1 362 cmp r0, r1
363 blo 1b 363 blo 1b
364 mcr p15, 0, r0, c7, c10, 4 @ drain WB 364 mcr p15, 0, r0, c7, c10, 4 @ drain WB
365 mov pc, lr 365 ret lr
366 366
367 .align 5 367 .align 5
368ENTRY(feroceon_range_dma_flush_range) 368ENTRY(feroceon_range_dma_flush_range)
@@ -375,7 +375,7 @@ ENTRY(feroceon_range_dma_flush_range)
375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top 375 mcr p15, 5, r1, c15, c15, 1 @ D clean/inv range top
376 msr cpsr_c, r2 @ restore interrupts 376 msr cpsr_c, r2 @ restore interrupts
377 mcr p15, 0, r0, c7, c10, 4 @ drain WB 377 mcr p15, 0, r0, c7, c10, 4 @ drain WB
378 mov pc, lr 378 ret lr
379 379
380/* 380/*
381 * dma_map_area(start, size, dir) 381 * dma_map_area(start, size, dir)
@@ -412,7 +412,7 @@ ENDPROC(feroceon_range_dma_map_area)
412 * - dir - DMA direction 412 * - dir - DMA direction
413 */ 413 */
414ENTRY(feroceon_dma_unmap_area) 414ENTRY(feroceon_dma_unmap_area)
415 mov pc, lr 415 ret lr
416ENDPROC(feroceon_dma_unmap_area) 416ENDPROC(feroceon_dma_unmap_area)
417 417
418 .globl feroceon_flush_kern_cache_louis 418 .globl feroceon_flush_kern_cache_louis
@@ -461,7 +461,7 @@ ENTRY(cpu_feroceon_dcache_clean_area)
461 bhi 1b 461 bhi 1b
462#endif 462#endif
463 mcr p15, 0, r0, c7, c10, 4 @ drain WB 463 mcr p15, 0, r0, c7, c10, 4 @ drain WB
464 mov pc, lr 464 ret lr
465 465
466/* =============================== PageTable ============================== */ 466/* =============================== PageTable ============================== */
467 467
@@ -490,9 +490,9 @@ ENTRY(cpu_feroceon_switch_mm)
490 490
491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 491 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 492 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
493 mov pc, r2 493 ret r2
494#else 494#else
495 mov pc, lr 495 ret lr
496#endif 496#endif
497 497
498/* 498/*
@@ -512,7 +512,7 @@ ENTRY(cpu_feroceon_set_pte_ext)
512#endif 512#endif
513 mcr p15, 0, r0, c7, c10, 4 @ drain WB 513 mcr p15, 0, r0, c7, c10, 4 @ drain WB
514#endif 514#endif
515 mov pc, lr 515 ret lr
516 516
517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ 517/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
518.globl cpu_feroceon_suspend_size 518.globl cpu_feroceon_suspend_size
@@ -554,7 +554,7 @@ __feroceon_setup:
554 mrc p15, 0, r0, c1, c0 @ get control register v4 554 mrc p15, 0, r0, c1, c0 @ get control register v4
555 bic r0, r0, r5 555 bic r0, r0, r5
556 orr r0, r0, r6 556 orr r0, r0, r6
557 mov pc, lr 557 ret lr
558 .size __feroceon_setup, . - __feroceon_setup 558 .size __feroceon_setup, . - __feroceon_setup
559 559
560 /* 560 /*
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 40acba595731..53d393455f13 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -45,7 +45,7 @@
45 * cpu_mohawk_proc_init() 45 * cpu_mohawk_proc_init()
46 */ 46 */
47ENTRY(cpu_mohawk_proc_init) 47ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr 48 ret lr
49 49
50/* 50/*
51 * cpu_mohawk_proc_fin() 51 * cpu_mohawk_proc_fin()
@@ -55,7 +55,7 @@ ENTRY(cpu_mohawk_proc_fin)
55 bic r0, r0, #0x1800 @ ...iz........... 55 bic r0, r0, #0x1800 @ ...iz...........
56 bic r0, r0, #0x0006 @ .............ca. 56 bic r0, r0, #0x0006 @ .............ca.
57 mcr p15, 0, r0, c1, c0, 0 @ disable caches 57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
58 mov pc, lr 58 ret lr
59 59
60/* 60/*
61 * cpu_mohawk_reset(loc) 61 * cpu_mohawk_reset(loc)
@@ -79,7 +79,7 @@ ENTRY(cpu_mohawk_reset)
79 bic ip, ip, #0x0007 @ .............cam 79 bic ip, ip, #0x0007 @ .............cam
80 bic ip, ip, #0x1100 @ ...i...s........ 80 bic ip, ip, #0x1100 @ ...i...s........
81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 81 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
82 mov pc, r0 82 ret r0
83ENDPROC(cpu_mohawk_reset) 83ENDPROC(cpu_mohawk_reset)
84 .popsection 84 .popsection
85 85
@@ -93,7 +93,7 @@ ENTRY(cpu_mohawk_do_idle)
93 mov r0, #0 93 mov r0, #0
94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 94 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt 95 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * flush_icache_all() 99 * flush_icache_all()
@@ -103,7 +103,7 @@ ENTRY(cpu_mohawk_do_idle)
103ENTRY(mohawk_flush_icache_all) 103ENTRY(mohawk_flush_icache_all)
104 mov r0, #0 104 mov r0, #0
105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
106 mov pc, lr 106 ret lr
107ENDPROC(mohawk_flush_icache_all) 107ENDPROC(mohawk_flush_icache_all)
108 108
109/* 109/*
@@ -128,7 +128,7 @@ __flush_whole_cache:
128 tst r2, #VM_EXEC 128 tst r2, #VM_EXEC
129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer 130 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * flush_user_cache_range(start, end, flags) 134 * flush_user_cache_range(start, end, flags)
@@ -158,7 +158,7 @@ ENTRY(mohawk_flush_user_cache_range)
158 blo 1b 158 blo 1b
159 tst r2, #VM_EXEC 159 tst r2, #VM_EXEC
160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 160 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
161 mov pc, lr 161 ret lr
162 162
163/* 163/*
164 * coherent_kern_range(start, end) 164 * coherent_kern_range(start, end)
@@ -194,7 +194,7 @@ ENTRY(mohawk_coherent_user_range)
194 blo 1b 194 blo 1b
195 mcr p15, 0, r0, c7, c10, 4 @ drain WB 195 mcr p15, 0, r0, c7, c10, 4 @ drain WB
196 mov r0, #0 196 mov r0, #0
197 mov pc, lr 197 ret lr
198 198
199/* 199/*
200 * flush_kern_dcache_area(void *addr, size_t size) 200 * flush_kern_dcache_area(void *addr, size_t size)
@@ -214,7 +214,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
214 mov r0, #0 214 mov r0, #0
215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
216 mcr p15, 0, r0, c7, c10, 4 @ drain WB 216 mcr p15, 0, r0, c7, c10, 4 @ drain WB
217 mov pc, lr 217 ret lr
218 218
219/* 219/*
220 * dma_inv_range(start, end) 220 * dma_inv_range(start, end)
@@ -240,7 +240,7 @@ mohawk_dma_inv_range:
240 cmp r0, r1 240 cmp r0, r1
241 blo 1b 241 blo 1b
242 mcr p15, 0, r0, c7, c10, 4 @ drain WB 242 mcr p15, 0, r0, c7, c10, 4 @ drain WB
243 mov pc, lr 243 ret lr
244 244
245/* 245/*
246 * dma_clean_range(start, end) 246 * dma_clean_range(start, end)
@@ -259,7 +259,7 @@ mohawk_dma_clean_range:
259 cmp r0, r1 259 cmp r0, r1
260 blo 1b 260 blo 1b
261 mcr p15, 0, r0, c7, c10, 4 @ drain WB 261 mcr p15, 0, r0, c7, c10, 4 @ drain WB
262 mov pc, lr 262 ret lr
263 263
264/* 264/*
265 * dma_flush_range(start, end) 265 * dma_flush_range(start, end)
@@ -277,7 +277,7 @@ ENTRY(mohawk_dma_flush_range)
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ drain WB 279 mcr p15, 0, r0, c7, c10, 4 @ drain WB
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_map_area(start, size, dir) 283 * dma_map_area(start, size, dir)
@@ -300,7 +300,7 @@ ENDPROC(mohawk_dma_map_area)
300 * - dir - DMA direction 300 * - dir - DMA direction
301 */ 301 */
302ENTRY(mohawk_dma_unmap_area) 302ENTRY(mohawk_dma_unmap_area)
303 mov pc, lr 303 ret lr
304ENDPROC(mohawk_dma_unmap_area) 304ENDPROC(mohawk_dma_unmap_area)
305 305
306 .globl mohawk_flush_kern_cache_louis 306 .globl mohawk_flush_kern_cache_louis
@@ -315,7 +315,7 @@ ENTRY(cpu_mohawk_dcache_clean_area)
315 subs r1, r1, #CACHE_DLINESIZE 315 subs r1, r1, #CACHE_DLINESIZE
316 bhi 1b 316 bhi 1b
317 mcr p15, 0, r0, c7, c10, 4 @ drain WB 317 mcr p15, 0, r0, c7, c10, 4 @ drain WB
318 mov pc, lr 318 ret lr
319 319
320/* 320/*
321 * cpu_mohawk_switch_mm(pgd) 321 * cpu_mohawk_switch_mm(pgd)
@@ -333,7 +333,7 @@ ENTRY(cpu_mohawk_switch_mm)
333 orr r0, r0, #0x18 @ cache the page table in L2 333 orr r0, r0, #0x18 @ cache the page table in L2
334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 334 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 335 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
336 mov pc, lr 336 ret lr
337 337
338/* 338/*
339 * cpu_mohawk_set_pte_ext(ptep, pte, ext) 339 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
@@ -346,7 +346,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
346 mov r0, r0 346 mov r0, r0
347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 347 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
348 mcr p15, 0, r0, c7, c10, 4 @ drain WB 348 mcr p15, 0, r0, c7, c10, 4 @ drain WB
349 mov pc, lr 349 ret lr
350 350
351.globl cpu_mohawk_suspend_size 351.globl cpu_mohawk_suspend_size
352.equ cpu_mohawk_suspend_size, 4 * 6 352.equ cpu_mohawk_suspend_size, 4 * 6
@@ -400,7 +400,7 @@ __mohawk_setup:
400 mrc p15, 0, r0, c1, c0 @ get control register 400 mrc p15, 0, r0, c1, c0 @ get control register
401 bic r0, r0, r5 401 bic r0, r0, r5
402 orr r0, r0, r6 402 orr r0, r0, r6
403 mov pc, lr 403 ret lr
404 404
405 .size __mohawk_setup, . - __mohawk_setup 405 .size __mohawk_setup, . - __mohawk_setup
406 406
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index c45319c8f1d9..8008a0461cf5 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -38,7 +38,7 @@
38ENTRY(cpu_sa110_proc_init) 38ENTRY(cpu_sa110_proc_init)
39 mov r0, #0 39 mov r0, #0
40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 40 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
41 mov pc, lr 41 ret lr
42 42
43/* 43/*
44 * cpu_sa110_proc_fin() 44 * cpu_sa110_proc_fin()
@@ -50,7 +50,7 @@ ENTRY(cpu_sa110_proc_fin)
50 bic r0, r0, #0x1000 @ ...i............ 50 bic r0, r0, #0x1000 @ ...i............
51 bic r0, r0, #0x000e @ ............wca. 51 bic r0, r0, #0x000e @ ............wca.
52 mcr p15, 0, r0, c1, c0, 0 @ disable caches 52 mcr p15, 0, r0, c1, c0, 0 @ disable caches
53 mov pc, lr 53 ret lr
54 54
55/* 55/*
56 * cpu_sa110_reset(loc) 56 * cpu_sa110_reset(loc)
@@ -74,7 +74,7 @@ ENTRY(cpu_sa110_reset)
74 bic ip, ip, #0x000f @ ............wcam 74 bic ip, ip, #0x000f @ ............wcam
75 bic ip, ip, #0x1100 @ ...i...s........ 75 bic ip, ip, #0x1100 @ ...i...s........
76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 76 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
77 mov pc, r0 77 ret r0
78ENDPROC(cpu_sa110_reset) 78ENDPROC(cpu_sa110_reset)
79 .popsection 79 .popsection
80 80
@@ -103,7 +103,7 @@ ENTRY(cpu_sa110_do_idle)
103 mov r0, r0 @ safety 103 mov r0, r0 @ safety
104 mov r0, r0 @ safety 104 mov r0, r0 @ safety
105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 105 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
106 mov pc, lr 106 ret lr
107 107
108/* ================================= CACHE ================================ */ 108/* ================================= CACHE ================================ */
109 109
@@ -121,7 +121,7 @@ ENTRY(cpu_sa110_dcache_clean_area)
121 add r0, r0, #DCACHELINESIZE 121 add r0, r0, #DCACHELINESIZE
122 subs r1, r1, #DCACHELINESIZE 122 subs r1, r1, #DCACHELINESIZE
123 bhi 1b 123 bhi 1b
124 mov pc, lr 124 ret lr
125 125
126/* =============================== PageTable ============================== */ 126/* =============================== PageTable ============================== */
127 127
@@ -141,7 +141,7 @@ ENTRY(cpu_sa110_switch_mm)
141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 141 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
142 ldr pc, [sp], #4 142 ldr pc, [sp], #4
143#else 143#else
144 mov pc, lr 144 ret lr
145#endif 145#endif
146 146
147/* 147/*
@@ -157,7 +157,7 @@ ENTRY(cpu_sa110_set_pte_ext)
157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 157 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
158 mcr p15, 0, r0, c7, c10, 4 @ drain WB 158 mcr p15, 0, r0, c7, c10, 4 @ drain WB
159#endif 159#endif
160 mov pc, lr 160 ret lr
161 161
162 .type __sa110_setup, #function 162 .type __sa110_setup, #function
163__sa110_setup: 163__sa110_setup:
@@ -173,7 +173,7 @@ __sa110_setup:
173 mrc p15, 0, r0, c1, c0 @ get control register v4 173 mrc p15, 0, r0, c1, c0 @ get control register v4
174 bic r0, r0, r5 174 bic r0, r0, r5
175 orr r0, r0, r6 175 orr r0, r0, r6
176 mov pc, lr 176 ret lr
177 .size __sa110_setup, . - __sa110_setup 177 .size __sa110_setup, . - __sa110_setup
178 178
179 /* 179 /*
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 09d241ae2dbe..89f97ac648a9 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -43,7 +43,7 @@ ENTRY(cpu_sa1100_proc_init)
43 mov r0, #0 43 mov r0, #0
44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching 44 mcr p15, 0, r0, c15, c1, 2 @ Enable clock switching
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_sa1100_proc_fin() 49 * cpu_sa1100_proc_fin()
@@ -58,7 +58,7 @@ ENTRY(cpu_sa1100_proc_fin)
58 bic r0, r0, #0x1000 @ ...i............ 58 bic r0, r0, #0x1000 @ ...i............
59 bic r0, r0, #0x000e @ ............wca. 59 bic r0, r0, #0x000e @ ............wca.
60 mcr p15, 0, r0, c1, c0, 0 @ disable caches 60 mcr p15, 0, r0, c1, c0, 0 @ disable caches
61 mov pc, lr 61 ret lr
62 62
63/* 63/*
64 * cpu_sa1100_reset(loc) 64 * cpu_sa1100_reset(loc)
@@ -82,7 +82,7 @@ ENTRY(cpu_sa1100_reset)
82 bic ip, ip, #0x000f @ ............wcam 82 bic ip, ip, #0x000f @ ............wcam
83 bic ip, ip, #0x1100 @ ...i...s........ 83 bic ip, ip, #0x1100 @ ...i...s........
84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
85 mov pc, r0 85 ret r0
86ENDPROC(cpu_sa1100_reset) 86ENDPROC(cpu_sa1100_reset)
87 .popsection 87 .popsection
88 88
@@ -113,7 +113,7 @@ ENTRY(cpu_sa1100_do_idle)
113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt 113 mcr p15, 0, r0, c15, c8, 2 @ wait for interrupt
114 mov r0, r0 @ safety 114 mov r0, r0 @ safety
115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching 115 mcr p15, 0, r0, c15, c1, 2 @ enable clock switching
116 mov pc, lr 116 ret lr
117 117
118/* ================================= CACHE ================================ */ 118/* ================================= CACHE ================================ */
119 119
@@ -131,7 +131,7 @@ ENTRY(cpu_sa1100_dcache_clean_area)
131 add r0, r0, #DCACHELINESIZE 131 add r0, r0, #DCACHELINESIZE
132 subs r1, r1, #DCACHELINESIZE 132 subs r1, r1, #DCACHELINESIZE
133 bhi 1b 133 bhi 1b
134 mov pc, lr 134 ret lr
135 135
136/* =============================== PageTable ============================== */ 136/* =============================== PageTable ============================== */
137 137
@@ -152,7 +152,7 @@ ENTRY(cpu_sa1100_switch_mm)
152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 152 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
153 ldr pc, [sp], #4 153 ldr pc, [sp], #4
154#else 154#else
155 mov pc, lr 155 ret lr
156#endif 156#endif
157 157
158/* 158/*
@@ -168,7 +168,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 168 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
169 mcr p15, 0, r0, c7, c10, 4 @ drain WB 169 mcr p15, 0, r0, c7, c10, 4 @ drain WB
170#endif 170#endif
171 mov pc, lr 171 ret lr
172 172
173.globl cpu_sa1100_suspend_size 173.globl cpu_sa1100_suspend_size
174.equ cpu_sa1100_suspend_size, 4 * 3 174.equ cpu_sa1100_suspend_size, 4 * 3
@@ -211,7 +211,7 @@ __sa1100_setup:
211 mrc p15, 0, r0, c1, c0 @ get control register v4 211 mrc p15, 0, r0, c1, c0 @ get control register v4
212 bic r0, r0, r5 212 bic r0, r0, r5
213 orr r0, r0, r6 213 orr r0, r0, r6
214 mov pc, lr 214 ret lr
215 .size __sa1100_setup, . - __sa1100_setup 215 .size __sa1100_setup, . - __sa1100_setup
216 216
217 /* 217 /*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 32b3558321c4..d0390f4b3f18 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -36,14 +36,14 @@
36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S 36#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
37 37
38ENTRY(cpu_v6_proc_init) 38ENTRY(cpu_v6_proc_init)
39 mov pc, lr 39 ret lr
40 40
41ENTRY(cpu_v6_proc_fin) 41ENTRY(cpu_v6_proc_fin)
42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 42 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
43 bic r0, r0, #0x1000 @ ...i............ 43 bic r0, r0, #0x1000 @ ...i............
44 bic r0, r0, #0x0006 @ .............ca. 44 bic r0, r0, #0x0006 @ .............ca.
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches 45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mov pc, lr 46 ret lr
47 47
48/* 48/*
49 * cpu_v6_reset(loc) 49 * cpu_v6_reset(loc)
@@ -62,7 +62,7 @@ ENTRY(cpu_v6_reset)
62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU 62 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
63 mov r1, #0 63 mov r1, #0
64 mcr p15, 0, r1, c7, c5, 4 @ ISB 64 mcr p15, 0, r1, c7, c5, 4 @ ISB
65 mov pc, r0 65 ret r0
66ENDPROC(cpu_v6_reset) 66ENDPROC(cpu_v6_reset)
67 .popsection 67 .popsection
68 68
@@ -77,14 +77,14 @@ ENTRY(cpu_v6_do_idle)
77 mov r1, #0 77 mov r1, #0
78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode 78 mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode
79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt 79 mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt
80 mov pc, lr 80 ret lr
81 81
82ENTRY(cpu_v6_dcache_clean_area) 82ENTRY(cpu_v6_dcache_clean_area)
831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 831: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
84 add r0, r0, #D_CACHE_LINE_SIZE 84 add r0, r0, #D_CACHE_LINE_SIZE
85 subs r1, r1, #D_CACHE_LINE_SIZE 85 subs r1, r1, #D_CACHE_LINE_SIZE
86 bhi 1b 86 bhi 1b
87 mov pc, lr 87 ret lr
88 88
89/* 89/*
90 * cpu_v6_switch_mm(pgd_phys, tsk) 90 * cpu_v6_switch_mm(pgd_phys, tsk)
@@ -113,7 +113,7 @@ ENTRY(cpu_v6_switch_mm)
113#endif 113#endif
114 mcr p15, 0, r1, c13, c0, 1 @ set context ID 114 mcr p15, 0, r1, c13, c0, 1 @ set context ID
115#endif 115#endif
116 mov pc, lr 116 ret lr
117 117
118/* 118/*
119 * cpu_v6_set_pte_ext(ptep, pte, ext) 119 * cpu_v6_set_pte_ext(ptep, pte, ext)
@@ -131,7 +131,7 @@ ENTRY(cpu_v6_set_pte_ext)
131#ifdef CONFIG_MMU 131#ifdef CONFIG_MMU
132 armv6_set_pte_ext cpu_v6 132 armv6_set_pte_ext cpu_v6
133#endif 133#endif
134 mov pc, lr 134 ret lr
135 135
136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ 136/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
137.globl cpu_v6_suspend_size 137.globl cpu_v6_suspend_size
@@ -241,7 +241,7 @@ __v6_setup:
241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg 241 mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg
242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration 242 orreq r0, r0, #(1 << 21) @ low interrupt latency configuration
243#endif 243#endif
244 mov pc, lr @ return to head.S:__ret 244 ret lr @ return to head.S:__ret
245 245
246 /* 246 /*
247 * V X F I D LR 247 * V X F I D LR
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 1f52915f2b28..ed448d8a596b 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -59,7 +59,7 @@ ENTRY(cpu_v7_switch_mm)
59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 59 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
60 isb 60 isb
61#endif 61#endif
62 mov pc, lr 62 bx lr
63ENDPROC(cpu_v7_switch_mm) 63ENDPROC(cpu_v7_switch_mm)
64 64
65/* 65/*
@@ -106,7 +106,7 @@ ENTRY(cpu_v7_set_pte_ext)
106 ALT_SMP(W(nop)) 106 ALT_SMP(W(nop))
107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 107 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
108#endif 108#endif
109 mov pc, lr 109 bx lr
110ENDPROC(cpu_v7_set_pte_ext) 110ENDPROC(cpu_v7_set_pte_ext)
111 111
112 /* 112 /*
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 22e3ad63500c..e4c8acfc1323 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -19,6 +19,7 @@
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22#include <asm/assembler.h>
22 23
23#define TTB_IRGN_NC (0 << 8) 24#define TTB_IRGN_NC (0 << 8)
24#define TTB_IRGN_WBWA (1 << 8) 25#define TTB_IRGN_WBWA (1 << 8)
@@ -61,7 +62,7 @@ ENTRY(cpu_v7_switch_mm)
61 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 62 mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0
62 isb 63 isb
63#endif 64#endif
64 mov pc, lr 65 ret lr
65ENDPROC(cpu_v7_switch_mm) 66ENDPROC(cpu_v7_switch_mm)
66 67
67#ifdef __ARMEB__ 68#ifdef __ARMEB__
@@ -86,13 +87,18 @@ ENTRY(cpu_v7_set_pte_ext)
86 tst rh, #1 << (57 - 32) @ L_PTE_NONE 87 tst rh, #1 << (57 - 32) @ L_PTE_NONE
87 bicne rl, #L_PTE_VALID 88 bicne rl, #L_PTE_VALID
88 bne 1f 89 bne 1f
89 tst rh, #1 << (55 - 32) @ L_PTE_DIRTY 90
90 orreq rl, #L_PTE_RDONLY 91 eor ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
92 @ test for !L_PTE_DIRTY || L_PTE_RDONLY
93 tst ip, #1 << (55 - 32) | 1 << (58 - 32)
94 orrne rl, #PTE_AP2
95 biceq rl, #PTE_AP2
96
911: strd r2, r3, [r0] 971: strd r2, r3, [r0]
92 ALT_SMP(W(nop)) 98 ALT_SMP(W(nop))
93 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte 99 ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
94#endif 100#endif
95 mov pc, lr 101 ret lr
96ENDPROC(cpu_v7_set_pte_ext) 102ENDPROC(cpu_v7_set_pte_ext)
97 103
98 /* 104 /*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3db2c2f04a30..b5d67db20897 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -26,7 +26,7 @@
26#endif 26#endif
27 27
28ENTRY(cpu_v7_proc_init) 28ENTRY(cpu_v7_proc_init)
29 mov pc, lr 29 ret lr
30ENDPROC(cpu_v7_proc_init) 30ENDPROC(cpu_v7_proc_init)
31 31
32ENTRY(cpu_v7_proc_fin) 32ENTRY(cpu_v7_proc_fin)
@@ -34,7 +34,7 @@ ENTRY(cpu_v7_proc_fin)
34 bic r0, r0, #0x1000 @ ...i............ 34 bic r0, r0, #0x1000 @ ...i............
35 bic r0, r0, #0x0006 @ .............ca. 35 bic r0, r0, #0x0006 @ .............ca.
36 mcr p15, 0, r0, c1, c0, 0 @ disable caches 36 mcr p15, 0, r0, c1, c0, 0 @ disable caches
37 mov pc, lr 37 ret lr
38ENDPROC(cpu_v7_proc_fin) 38ENDPROC(cpu_v7_proc_fin)
39 39
40/* 40/*
@@ -71,20 +71,20 @@ ENDPROC(cpu_v7_reset)
71ENTRY(cpu_v7_do_idle) 71ENTRY(cpu_v7_do_idle)
72 dsb @ WFI may enter a low-power mode 72 dsb @ WFI may enter a low-power mode
73 wfi 73 wfi
74 mov pc, lr 74 ret lr
75ENDPROC(cpu_v7_do_idle) 75ENDPROC(cpu_v7_do_idle)
76 76
77ENTRY(cpu_v7_dcache_clean_area) 77ENTRY(cpu_v7_dcache_clean_area)
78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW 78 ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
79 ALT_UP_B(1f) 79 ALT_UP_B(1f)
80 mov pc, lr 80 ret lr
811: dcache_line_size r2, r3 811: dcache_line_size r2, r3
822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 822: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
83 add r0, r0, r2 83 add r0, r0, r2
84 subs r1, r1, r2 84 subs r1, r1, r2
85 bhi 2b 85 bhi 2b
86 dsb ishst 86 dsb ishst
87 mov pc, lr 87 ret lr
88ENDPROC(cpu_v7_dcache_clean_area) 88ENDPROC(cpu_v7_dcache_clean_area)
89 89
90 string cpu_v7_name, "ARMv7 Processor" 90 string cpu_v7_name, "ARMv7 Processor"
@@ -152,6 +152,40 @@ ENTRY(cpu_v7_do_resume)
152ENDPROC(cpu_v7_do_resume) 152ENDPROC(cpu_v7_do_resume)
153#endif 153#endif
154 154
155/*
156 * Cortex-A9 processor functions
157 */
158 globl_equ cpu_ca9mp_proc_init, cpu_v7_proc_init
159 globl_equ cpu_ca9mp_proc_fin, cpu_v7_proc_fin
160 globl_equ cpu_ca9mp_reset, cpu_v7_reset
161 globl_equ cpu_ca9mp_do_idle, cpu_v7_do_idle
162 globl_equ cpu_ca9mp_dcache_clean_area, cpu_v7_dcache_clean_area
163 globl_equ cpu_ca9mp_switch_mm, cpu_v7_switch_mm
164 globl_equ cpu_ca9mp_set_pte_ext, cpu_v7_set_pte_ext
165.globl cpu_ca9mp_suspend_size
166.equ cpu_ca9mp_suspend_size, cpu_v7_suspend_size + 4 * 2
167#ifdef CONFIG_ARM_CPU_SUSPEND
168ENTRY(cpu_ca9mp_do_suspend)
169 stmfd sp!, {r4 - r5}
170 mrc p15, 0, r4, c15, c0, 1 @ Diagnostic register
171 mrc p15, 0, r5, c15, c0, 0 @ Power register
172 stmia r0!, {r4 - r5}
173 ldmfd sp!, {r4 - r5}
174 b cpu_v7_do_suspend
175ENDPROC(cpu_ca9mp_do_suspend)
176
177ENTRY(cpu_ca9mp_do_resume)
178 ldmia r0!, {r4 - r5}
179 mrc p15, 0, r10, c15, c0, 1 @ Read Diagnostic register
180 teq r4, r10 @ Already restored?
181 mcrne p15, 0, r4, c15, c0, 1 @ No, so restore it
182 mrc p15, 0, r10, c15, c0, 0 @ Read Power register
183 teq r5, r10 @ Already restored?
184 mcrne p15, 0, r5, c15, c0, 0 @ No, so restore it
185 b cpu_v7_do_resume
186ENDPROC(cpu_ca9mp_do_resume)
187#endif
188
155#ifdef CONFIG_CPU_PJ4B 189#ifdef CONFIG_CPU_PJ4B
156 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm 190 globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
157 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext 191 globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
@@ -163,7 +197,7 @@ ENTRY(cpu_pj4b_do_idle)
163 dsb @ WFI may enter a low-power mode 197 dsb @ WFI may enter a low-power mode
164 wfi 198 wfi
165 dsb @barrier 199 dsb @barrier
166 mov pc, lr 200 ret lr
167ENDPROC(cpu_pj4b_do_idle) 201ENDPROC(cpu_pj4b_do_idle)
168#else 202#else
169 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle 203 globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
@@ -184,16 +218,16 @@ ENDPROC(cpu_pj4b_do_suspend)
184 218
185ENTRY(cpu_pj4b_do_resume) 219ENTRY(cpu_pj4b_do_resume)
186 ldmia r0!, {r6 - r10} 220 ldmia r0!, {r6 - r10}
187 mcr p15, 1, r6, c15, c1, 0 @ save CP15 - extra features 221 mcr p15, 1, r6, c15, c1, 0 @ restore CP15 - extra features
188 mcr p15, 1, r7, c15, c2, 0 @ save CP15 - Aux Func Modes Ctrl 0 222 mcr p15, 1, r7, c15, c2, 0 @ restore CP15 - Aux Func Modes Ctrl 0
189 mcr p15, 1, r8, c15, c1, 2 @ save CP15 - Aux Debug Modes Ctrl 2 223 mcr p15, 1, r8, c15, c1, 2 @ restore CP15 - Aux Debug Modes Ctrl 2
190 mcr p15, 1, r9, c15, c1, 1 @ save CP15 - Aux Debug Modes Ctrl 1 224 mcr p15, 1, r9, c15, c1, 1 @ restore CP15 - Aux Debug Modes Ctrl 1
191 mcr p15, 0, r10, c9, c14, 0 @ save CP15 - PMC 225 mcr p15, 0, r10, c9, c14, 0 @ restore CP15 - PMC
192 b cpu_v7_do_resume 226 b cpu_v7_do_resume
193ENDPROC(cpu_pj4b_do_resume) 227ENDPROC(cpu_pj4b_do_resume)
194#endif 228#endif
195.globl cpu_pj4b_suspend_size 229.globl cpu_pj4b_suspend_size
196.equ cpu_pj4b_suspend_size, 4 * 14 230.equ cpu_pj4b_suspend_size, cpu_v7_suspend_size + 4 * 5
197 231
198#endif 232#endif
199 233
@@ -216,6 +250,7 @@ __v7_cr7mp_setup:
216__v7_ca7mp_setup: 250__v7_ca7mp_setup:
217__v7_ca12mp_setup: 251__v7_ca12mp_setup:
218__v7_ca15mp_setup: 252__v7_ca15mp_setup:
253__v7_b15mp_setup:
219__v7_ca17mp_setup: 254__v7_ca17mp_setup:
220 mov r10, #0 255 mov r10, #0
2211: 2561:
@@ -407,7 +442,7 @@ __v7_setup:
407 bic r0, r0, r5 @ clear bits them 442 bic r0, r0, r5 @ clear bits them
408 orr r0, r0, r6 @ set them 443 orr r0, r0, r6 @ set them
409 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions 444 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
410 mov pc, lr @ return to head.S:__ret 445 ret lr @ return to head.S:__ret
411ENDPROC(__v7_setup) 446ENDPROC(__v7_setup)
412 447
413 .align 2 448 .align 2
@@ -418,6 +453,7 @@ __v7_setup_stack:
418 453
419 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) 454 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
420 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 455 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
456 define_processor_functions ca9mp, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
421#ifdef CONFIG_CPU_PJ4B 457#ifdef CONFIG_CPU_PJ4B
422 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 458 define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
423#endif 459#endif
@@ -470,7 +506,7 @@ __v7_ca5mp_proc_info:
470__v7_ca9mp_proc_info: 506__v7_ca9mp_proc_info:
471 .long 0x410fc090 507 .long 0x410fc090
472 .long 0xff0ffff0 508 .long 0xff0ffff0
473 __v7_proc __v7_ca9mp_setup 509 __v7_proc __v7_ca9mp_setup, proc_fns = ca9mp_processor_functions
474 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 510 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
475 511
476#endif /* CONFIG_ARM_LPAE */ 512#endif /* CONFIG_ARM_LPAE */
@@ -528,6 +564,16 @@ __v7_ca15mp_proc_info:
528 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info 564 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
529 565
530 /* 566 /*
567 * Broadcom Corporation Brahma-B15 processor.
568 */
569 .type __v7_b15mp_proc_info, #object
570__v7_b15mp_proc_info:
571 .long 0x420f00f0
572 .long 0xff0ffff0
573 __v7_proc __v7_b15mp_setup, hwcaps = HWCAP_IDIV
574 .size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
575
576 /*
531 * ARM Ltd. Cortex A17 processor. 577 * ARM Ltd. Cortex A17 processor.
532 */ 578 */
533 .type __v7_ca17mp_proc_info, #object 579 .type __v7_ca17mp_proc_info, #object
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 1ca37c72f12f..d1e68b553d3b 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -16,11 +16,11 @@
16#include "proc-macros.S" 16#include "proc-macros.S"
17 17
18ENTRY(cpu_v7m_proc_init) 18ENTRY(cpu_v7m_proc_init)
19 mov pc, lr 19 ret lr
20ENDPROC(cpu_v7m_proc_init) 20ENDPROC(cpu_v7m_proc_init)
21 21
22ENTRY(cpu_v7m_proc_fin) 22ENTRY(cpu_v7m_proc_fin)
23 mov pc, lr 23 ret lr
24ENDPROC(cpu_v7m_proc_fin) 24ENDPROC(cpu_v7m_proc_fin)
25 25
26/* 26/*
@@ -34,7 +34,7 @@ ENDPROC(cpu_v7m_proc_fin)
34 */ 34 */
35 .align 5 35 .align 5
36ENTRY(cpu_v7m_reset) 36ENTRY(cpu_v7m_reset)
37 mov pc, r0 37 ret r0
38ENDPROC(cpu_v7m_reset) 38ENDPROC(cpu_v7m_reset)
39 39
40/* 40/*
@@ -46,18 +46,18 @@ ENDPROC(cpu_v7m_reset)
46 */ 46 */
47ENTRY(cpu_v7m_do_idle) 47ENTRY(cpu_v7m_do_idle)
48 wfi 48 wfi
49 mov pc, lr 49 ret lr
50ENDPROC(cpu_v7m_do_idle) 50ENDPROC(cpu_v7m_do_idle)
51 51
52ENTRY(cpu_v7m_dcache_clean_area) 52ENTRY(cpu_v7m_dcache_clean_area)
53 mov pc, lr 53 ret lr
54ENDPROC(cpu_v7m_dcache_clean_area) 54ENDPROC(cpu_v7m_dcache_clean_area)
55 55
56/* 56/*
57 * There is no MMU, so here is nothing to do. 57 * There is no MMU, so here is nothing to do.
58 */ 58 */
59ENTRY(cpu_v7m_switch_mm) 59ENTRY(cpu_v7m_switch_mm)
60 mov pc, lr 60 ret lr
61ENDPROC(cpu_v7m_switch_mm) 61ENDPROC(cpu_v7m_switch_mm)
62 62
63.globl cpu_v7m_suspend_size 63.globl cpu_v7m_suspend_size
@@ -65,11 +65,11 @@ ENDPROC(cpu_v7m_switch_mm)
65 65
66#ifdef CONFIG_ARM_CPU_SUSPEND 66#ifdef CONFIG_ARM_CPU_SUSPEND
67ENTRY(cpu_v7m_do_suspend) 67ENTRY(cpu_v7m_do_suspend)
68 mov pc, lr 68 ret lr
69ENDPROC(cpu_v7m_do_suspend) 69ENDPROC(cpu_v7m_do_suspend)
70 70
71ENTRY(cpu_v7m_do_resume) 71ENTRY(cpu_v7m_do_resume)
72 mov pc, lr 72 ret lr
73ENDPROC(cpu_v7m_do_resume) 73ENDPROC(cpu_v7m_do_resume)
74#endif 74#endif
75 75
@@ -120,7 +120,7 @@ __v7m_setup:
120 ldr r12, [r0, V7M_SCB_CCR] @ system control register 120 ldr r12, [r0, V7M_SCB_CCR] @ system control register
121 orr r12, #V7M_SCB_CCR_STKALIGN 121 orr r12, #V7M_SCB_CCR_STKALIGN
122 str r12, [r0, V7M_SCB_CCR] 122 str r12, [r0, V7M_SCB_CCR]
123 mov pc, lr 123 ret lr
124ENDPROC(__v7m_setup) 124ENDPROC(__v7m_setup)
125 125
126 .align 2 126 .align 2
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index dc1645890042..f8acdfece036 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -83,7 +83,7 @@
83 * Nothing too exciting at the moment 83 * Nothing too exciting at the moment
84 */ 84 */
85ENTRY(cpu_xsc3_proc_init) 85ENTRY(cpu_xsc3_proc_init)
86 mov pc, lr 86 ret lr
87 87
88/* 88/*
89 * cpu_xsc3_proc_fin() 89 * cpu_xsc3_proc_fin()
@@ -93,7 +93,7 @@ ENTRY(cpu_xsc3_proc_fin)
93 bic r0, r0, #0x1800 @ ...IZ........... 93 bic r0, r0, #0x1800 @ ...IZ...........
94 bic r0, r0, #0x0006 @ .............CA. 94 bic r0, r0, #0x0006 @ .............CA.
95 mcr p15, 0, r0, c1, c0, 0 @ disable caches 95 mcr p15, 0, r0, c1, c0, 0 @ disable caches
96 mov pc, lr 96 ret lr
97 97
98/* 98/*
99 * cpu_xsc3_reset(loc) 99 * cpu_xsc3_reset(loc)
@@ -119,7 +119,7 @@ ENTRY(cpu_xsc3_reset)
119 @ CAUTION: MMU turned off from this point. We count on the pipeline 119 @ CAUTION: MMU turned off from this point. We count on the pipeline
120 @ already containing those two last instructions to survive. 120 @ already containing those two last instructions to survive.
121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs 121 mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
122 mov pc, r0 122 ret r0
123ENDPROC(cpu_xsc3_reset) 123ENDPROC(cpu_xsc3_reset)
124 .popsection 124 .popsection
125 125
@@ -138,7 +138,7 @@ ENDPROC(cpu_xsc3_reset)
138ENTRY(cpu_xsc3_do_idle) 138ENTRY(cpu_xsc3_do_idle)
139 mov r0, #1 139 mov r0, #1
140 mcr p14, 0, r0, c7, c0, 0 @ go to idle 140 mcr p14, 0, r0, c7, c0, 0 @ go to idle
141 mov pc, lr 141 ret lr
142 142
143/* ================================= CACHE ================================ */ 143/* ================================= CACHE ================================ */
144 144
@@ -150,7 +150,7 @@ ENTRY(cpu_xsc3_do_idle)
150ENTRY(xsc3_flush_icache_all) 150ENTRY(xsc3_flush_icache_all)
151 mov r0, #0 151 mov r0, #0
152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 152 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
153 mov pc, lr 153 ret lr
154ENDPROC(xsc3_flush_icache_all) 154ENDPROC(xsc3_flush_icache_all)
155 155
156/* 156/*
@@ -176,7 +176,7 @@ __flush_whole_cache:
176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB 176 mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 177 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 178 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
179 mov pc, lr 179 ret lr
180 180
181/* 181/*
182 * flush_user_cache_range(start, end, vm_flags) 182 * flush_user_cache_range(start, end, vm_flags)
@@ -205,7 +205,7 @@ ENTRY(xsc3_flush_user_cache_range)
205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB 205 mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier 206 mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush 207 mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
208 mov pc, lr 208 ret lr
209 209
210/* 210/*
211 * coherent_kern_range(start, end) 211 * coherent_kern_range(start, end)
@@ -232,7 +232,7 @@ ENTRY(xsc3_coherent_user_range)
232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 232 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 233 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 234 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
235 mov pc, lr 235 ret lr
236 236
237/* 237/*
238 * flush_kern_dcache_area(void *addr, size_t size) 238 * flush_kern_dcache_area(void *addr, size_t size)
@@ -253,7 +253,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB 253 mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 254 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush 255 mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
256 mov pc, lr 256 ret lr
257 257
258/* 258/*
259 * dma_inv_range(start, end) 259 * dma_inv_range(start, end)
@@ -277,7 +277,7 @@ xsc3_dma_inv_range:
277 cmp r0, r1 277 cmp r0, r1
278 blo 1b 278 blo 1b
279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 279 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
280 mov pc, lr 280 ret lr
281 281
282/* 282/*
283 * dma_clean_range(start, end) 283 * dma_clean_range(start, end)
@@ -294,7 +294,7 @@ xsc3_dma_clean_range:
294 cmp r0, r1 294 cmp r0, r1
295 blo 1b 295 blo 1b
296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 296 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
297 mov pc, lr 297 ret lr
298 298
299/* 299/*
300 * dma_flush_range(start, end) 300 * dma_flush_range(start, end)
@@ -311,7 +311,7 @@ ENTRY(xsc3_dma_flush_range)
311 cmp r0, r1 311 cmp r0, r1
312 blo 1b 312 blo 1b
313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier 313 mcr p15, 0, r0, c7, c10, 4 @ data write barrier
314 mov pc, lr 314 ret lr
315 315
316/* 316/*
317 * dma_map_area(start, size, dir) 317 * dma_map_area(start, size, dir)
@@ -334,7 +334,7 @@ ENDPROC(xsc3_dma_map_area)
334 * - dir - DMA direction 334 * - dir - DMA direction
335 */ 335 */
336ENTRY(xsc3_dma_unmap_area) 336ENTRY(xsc3_dma_unmap_area)
337 mov pc, lr 337 ret lr
338ENDPROC(xsc3_dma_unmap_area) 338ENDPROC(xsc3_dma_unmap_area)
339 339
340 .globl xsc3_flush_kern_cache_louis 340 .globl xsc3_flush_kern_cache_louis
@@ -348,7 +348,7 @@ ENTRY(cpu_xsc3_dcache_clean_area)
348 add r0, r0, #CACHELINESIZE 348 add r0, r0, #CACHELINESIZE
349 subs r1, r1, #CACHELINESIZE 349 subs r1, r1, #CACHELINESIZE
350 bhi 1b 350 bhi 1b
351 mov pc, lr 351 ret lr
352 352
353/* =============================== PageTable ============================== */ 353/* =============================== PageTable ============================== */
354 354
@@ -406,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
406 orr r2, r2, ip 406 orr r2, r2, ip
407 407
408 xscale_set_pte_ext_epilogue 408 xscale_set_pte_ext_epilogue
409 mov pc, lr 409 ret lr
410 410
411 .ltorg 411 .ltorg
412 .align 412 .align
@@ -478,7 +478,7 @@ __xsc3_setup:
478 bic r0, r0, r5 @ ..V. ..R. .... ..A. 478 bic r0, r0, r5 @ ..V. ..R. .... ..A.
479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) 479 orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
480 @ ...I Z..S .... .... (uc) 480 @ ...I Z..S .... .... (uc)
481 mov pc, lr 481 ret lr
482 482
483 .size __xsc3_setup, . - __xsc3_setup 483 .size __xsc3_setup, . - __xsc3_setup
484 484
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index d19b1cfcad91..23259f104c66 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -118,7 +118,7 @@ ENTRY(cpu_xscale_proc_init)
118 mrc p15, 0, r1, c1, c0, 1 118 mrc p15, 0, r1, c1, c0, 1
119 bic r1, r1, #1 119 bic r1, r1, #1
120 mcr p15, 0, r1, c1, c0, 1 120 mcr p15, 0, r1, c1, c0, 1
121 mov pc, lr 121 ret lr
122 122
123/* 123/*
124 * cpu_xscale_proc_fin() 124 * cpu_xscale_proc_fin()
@@ -128,7 +128,7 @@ ENTRY(cpu_xscale_proc_fin)
128 bic r0, r0, #0x1800 @ ...IZ........... 128 bic r0, r0, #0x1800 @ ...IZ...........
129 bic r0, r0, #0x0006 @ .............CA. 129 bic r0, r0, #0x0006 @ .............CA.
130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches
131 mov pc, lr 131 ret lr
132 132
133/* 133/*
134 * cpu_xscale_reset(loc) 134 * cpu_xscale_reset(loc)
@@ -160,7 +160,7 @@ ENTRY(cpu_xscale_reset)
160 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ CAUTION: MMU turned off from this point. We count on the pipeline
161 @ already containing those two last instructions to survive. 161 @ already containing those two last instructions to survive.
162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
163 mov pc, r0 163 ret r0
164ENDPROC(cpu_xscale_reset) 164ENDPROC(cpu_xscale_reset)
165 .popsection 165 .popsection
166 166
@@ -179,7 +179,7 @@ ENDPROC(cpu_xscale_reset)
179ENTRY(cpu_xscale_do_idle) 179ENTRY(cpu_xscale_do_idle)
180 mov r0, #1 180 mov r0, #1
181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 181 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE
182 mov pc, lr 182 ret lr
183 183
184/* ================================= CACHE ================================ */ 184/* ================================= CACHE ================================ */
185 185
@@ -191,7 +191,7 @@ ENTRY(cpu_xscale_do_idle)
191ENTRY(xscale_flush_icache_all) 191ENTRY(xscale_flush_icache_all)
192 mov r0, #0 192 mov r0, #0
193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
194 mov pc, lr 194 ret lr
195ENDPROC(xscale_flush_icache_all) 195ENDPROC(xscale_flush_icache_all)
196 196
197/* 197/*
@@ -216,7 +216,7 @@ __flush_whole_cache:
216 tst r2, #VM_EXEC 216 tst r2, #VM_EXEC
217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB
218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 218 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
219 mov pc, lr 219 ret lr
220 220
221/* 221/*
222 * flush_user_cache_range(start, end, vm_flags) 222 * flush_user_cache_range(start, end, vm_flags)
@@ -245,7 +245,7 @@ ENTRY(xscale_flush_user_cache_range)
245 tst r2, #VM_EXEC 245 tst r2, #VM_EXEC
246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 246 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB
247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 247 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
248 mov pc, lr 248 ret lr
249 249
250/* 250/*
251 * coherent_kern_range(start, end) 251 * coherent_kern_range(start, end)
@@ -269,7 +269,7 @@ ENTRY(xscale_coherent_kern_range)
269 mov r0, #0 269 mov r0, #0
270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 270 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 271 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
272 mov pc, lr 272 ret lr
273 273
274/* 274/*
275 * coherent_user_range(start, end) 275 * coherent_user_range(start, end)
@@ -291,7 +291,7 @@ ENTRY(xscale_coherent_user_range)
291 mov r0, #0 291 mov r0, #0
292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 292 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB
293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 293 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
294 mov pc, lr 294 ret lr
295 295
296/* 296/*
297 * flush_kern_dcache_area(void *addr, size_t size) 297 * flush_kern_dcache_area(void *addr, size_t size)
@@ -312,7 +312,7 @@ ENTRY(xscale_flush_kern_dcache_area)
312 mov r0, #0 312 mov r0, #0
313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 313 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB
314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 314 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
315 mov pc, lr 315 ret lr
316 316
317/* 317/*
318 * dma_inv_range(start, end) 318 * dma_inv_range(start, end)
@@ -336,7 +336,7 @@ xscale_dma_inv_range:
336 cmp r0, r1 336 cmp r0, r1
337 blo 1b 337 blo 1b
338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 338 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
339 mov pc, lr 339 ret lr
340 340
341/* 341/*
342 * dma_clean_range(start, end) 342 * dma_clean_range(start, end)
@@ -353,7 +353,7 @@ xscale_dma_clean_range:
353 cmp r0, r1 353 cmp r0, r1
354 blo 1b 354 blo 1b
355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 355 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
356 mov pc, lr 356 ret lr
357 357
358/* 358/*
359 * dma_flush_range(start, end) 359 * dma_flush_range(start, end)
@@ -371,7 +371,7 @@ ENTRY(xscale_dma_flush_range)
371 cmp r0, r1 371 cmp r0, r1
372 blo 1b 372 blo 1b
373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 373 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
374 mov pc, lr 374 ret lr
375 375
376/* 376/*
377 * dma_map_area(start, size, dir) 377 * dma_map_area(start, size, dir)
@@ -407,7 +407,7 @@ ENDPROC(xscale_80200_A0_A1_dma_map_area)
407 * - dir - DMA direction 407 * - dir - DMA direction
408 */ 408 */
409ENTRY(xscale_dma_unmap_area) 409ENTRY(xscale_dma_unmap_area)
410 mov pc, lr 410 ret lr
411ENDPROC(xscale_dma_unmap_area) 411ENDPROC(xscale_dma_unmap_area)
412 412
413 .globl xscale_flush_kern_cache_louis 413 .globl xscale_flush_kern_cache_louis
@@ -458,7 +458,7 @@ ENTRY(cpu_xscale_dcache_clean_area)
458 add r0, r0, #CACHELINESIZE 458 add r0, r0, #CACHELINESIZE
459 subs r1, r1, #CACHELINESIZE 459 subs r1, r1, #CACHELINESIZE
460 bhi 1b 460 bhi 1b
461 mov pc, lr 461 ret lr
462 462
463/* =============================== PageTable ============================== */ 463/* =============================== PageTable ============================== */
464 464
@@ -521,7 +521,7 @@ ENTRY(cpu_xscale_set_pte_ext)
521 orr r2, r2, ip 521 orr r2, r2, ip
522 522
523 xscale_set_pte_ext_epilogue 523 xscale_set_pte_ext_epilogue
524 mov pc, lr 524 ret lr
525 525
526 .ltorg 526 .ltorg
527 .align 527 .align
@@ -572,7 +572,7 @@ __xscale_setup:
572 mrc p15, 0, r0, c1, c0, 0 @ get control register 572 mrc p15, 0, r0, c1, c0, 0 @ get control register
573 bic r0, r0, r5 573 bic r0, r0, r5
574 orr r0, r0, r6 574 orr r0, r0, r6
575 mov pc, lr 575 ret lr
576 .size __xscale_setup, . - __xscale_setup 576 .size __xscale_setup, . - __xscale_setup
577 577
578 /* 578 /*
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index d3ddcf9a76ca..d2d9ecbe0aac 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -18,6 +18,7 @@
18 */ 18 */
19#include <linux/linkage.h> 19#include <linux/linkage.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <asm/assembler.h>
21#include <asm/asm-offsets.h> 22#include <asm/asm-offsets.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
23#include "proc-macros.S" 24#include "proc-macros.S"
@@ -37,7 +38,7 @@ ENTRY(fa_flush_user_tlb_range)
37 vma_vm_mm ip, r2 38 vma_vm_mm ip, r2
38 act_mm r3 @ get current->active_mm 39 act_mm r3 @ get current->active_mm
39 eors r3, ip, r3 @ == mm ? 40 eors r3, ip, r3 @ == mm ?
40 movne pc, lr @ no, we dont do anything 41 retne lr @ no, we dont do anything
41 mov r3, #0 42 mov r3, #0
42 mcr p15, 0, r3, c7, c10, 4 @ drain WB 43 mcr p15, 0, r3, c7, c10, 4 @ drain WB
43 bic r0, r0, #0x0ff 44 bic r0, r0, #0x0ff
@@ -47,7 +48,7 @@ ENTRY(fa_flush_user_tlb_range)
47 cmp r0, r1 48 cmp r0, r1
48 blo 1b 49 blo 1b
49 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 50 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
50 mov pc, lr 51 ret lr
51 52
52 53
53ENTRY(fa_flush_kern_tlb_range) 54ENTRY(fa_flush_kern_tlb_range)
@@ -61,7 +62,7 @@ ENTRY(fa_flush_kern_tlb_range)
61 blo 1b 62 blo 1b
62 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 63 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
63 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) 64 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
64 mov pc, lr 65 ret lr
65 66
66 __INITDATA 67 __INITDATA
67 68
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index 17a025ade573..a2b5dca42048 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37.v4_flush_kern_tlb_range: 38.v4_flush_kern_tlb_range:
38 bic r0, r0, #0x0ff 39 bic r0, r0, #0x0ff
39 bic r0, r0, #0xf00 40 bic r0, r0, #0xf00
@@ -41,7 +42,7 @@ ENTRY(v4_flush_user_tlb_range)
41 add r0, r0, #PAGE_SZ 42 add r0, r0, #PAGE_SZ
42 cmp r0, r1 43 cmp r0, r1
43 blo 1b 44 blo 1b
44 mov pc, lr 45 ret lr
45 46
46/* 47/*
47 * v4_flush_kern_tlb_range(start, end) 48 * v4_flush_kern_tlb_range(start, end)
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index c04598fa4d4a..5a093b458dbc 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -33,7 +34,7 @@ ENTRY(v4wb_flush_user_tlb_range)
33 vma_vm_mm ip, r2 34 vma_vm_mm ip, r2
34 act_mm r3 @ get current->active_mm 35 act_mm r3 @ get current->active_mm
35 eors r3, ip, r3 @ == mm ? 36 eors r3, ip, r3 @ == mm ?
36 movne pc, lr @ no, we dont do anything 37 retne lr @ no, we dont do anything
37 vma_vm_flags r2, r2 38 vma_vm_flags r2, r2
38 mcr p15, 0, r3, c7, c10, 4 @ drain WB 39 mcr p15, 0, r3, c7, c10, 4 @ drain WB
39 tst r2, #VM_EXEC 40 tst r2, #VM_EXEC
@@ -44,7 +45,7 @@ ENTRY(v4wb_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49/* 50/*
50 * v4_flush_kern_tlb_range(start, end) 51 * v4_flush_kern_tlb_range(start, end)
@@ -65,7 +66,7 @@ ENTRY(v4wb_flush_kern_tlb_range)
65 add r0, r0, #PAGE_SZ 66 add r0, r0, #PAGE_SZ
66 cmp r0, r1 67 cmp r0, r1
67 blo 1b 68 blo 1b
68 mov pc, lr 69 ret lr
69 70
70 __INITDATA 71 __INITDATA
71 72
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index 1f6062b6c1c1..058861548f68 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -14,6 +14,7 @@
14 */ 14 */
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <asm/assembler.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include "proc-macros.S" 20#include "proc-macros.S"
@@ -32,7 +33,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
32 vma_vm_mm ip, r2 33 vma_vm_mm ip, r2
33 act_mm r3 @ get current->active_mm 34 act_mm r3 @ get current->active_mm
34 eors r3, ip, r3 @ == mm ? 35 eors r3, ip, r3 @ == mm ?
35 movne pc, lr @ no, we dont do anything 36 retne lr @ no, we dont do anything
36 mov r3, #0 37 mov r3, #0
37 mcr p15, 0, r3, c7, c10, 4 @ drain WB 38 mcr p15, 0, r3, c7, c10, 4 @ drain WB
38 vma_vm_flags r2, r2 39 vma_vm_flags r2, r2
@@ -44,7 +45,7 @@ ENTRY(v4wbi_flush_user_tlb_range)
44 add r0, r0, #PAGE_SZ 45 add r0, r0, #PAGE_SZ
45 cmp r0, r1 46 cmp r0, r1
46 blo 1b 47 blo 1b
47 mov pc, lr 48 ret lr
48 49
49ENTRY(v4wbi_flush_kern_tlb_range) 50ENTRY(v4wbi_flush_kern_tlb_range)
50 mov r3, #0 51 mov r3, #0
@@ -56,7 +57,7 @@ ENTRY(v4wbi_flush_kern_tlb_range)
56 add r0, r0, #PAGE_SZ 57 add r0, r0, #PAGE_SZ
57 cmp r0, r1 58 cmp r0, r1
58 blo 1b 59 blo 1b
59 mov pc, lr 60 ret lr
60 61
61 __INITDATA 62 __INITDATA
62 63
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index eca07f550a0b..6f689be638bd 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
16#include <asm/assembler.h>
16#include <asm/page.h> 17#include <asm/page.h>
17#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
18#include "proc-macros.S" 19#include "proc-macros.S"
@@ -55,7 +56,7 @@ ENTRY(v6wbi_flush_user_tlb_range)
55 cmp r0, r1 56 cmp r0, r1
56 blo 1b 57 blo 1b
57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 58 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
58 mov pc, lr 59 ret lr
59 60
60/* 61/*
61 * v6wbi_flush_kern_tlb_range(start,end) 62 * v6wbi_flush_kern_tlb_range(start,end)
@@ -84,7 +85,7 @@ ENTRY(v6wbi_flush_kern_tlb_range)
84 blo 1b 85 blo 1b
85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 86 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) 87 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
87 mov pc, lr 88 ret lr
88 89
89 __INIT 90 __INIT
90 91
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 355308767bae..e5101a3bc57c 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -57,7 +57,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
57 cmp r0, r1 57 cmp r0, r1
58 blo 1b 58 blo 1b
59 dsb ish 59 dsb ish
60 mov pc, lr 60 ret lr
61ENDPROC(v7wbi_flush_user_tlb_range) 61ENDPROC(v7wbi_flush_user_tlb_range)
62 62
63/* 63/*
@@ -86,7 +86,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
86 blo 1b 86 blo 1b
87 dsb ish 87 dsb ish
88 isb 88 isb
89 mov pc, lr 89 ret lr
90ENDPROC(v7wbi_flush_kern_tlb_range) 90ENDPROC(v7wbi_flush_kern_tlb_range)
91 91
92 __INIT 92 __INIT
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index d18dde95b8aa..5d65be1f1e8a 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -19,7 +19,7 @@
19 along with this program; if not, write to the Free Software 19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/ 21*/
22 22#include <asm/assembler.h>
23#include <asm/opcodes.h> 23#include <asm/opcodes.h>
24 24
25/* This is the kernel's entry point into the floating point emulator. 25/* This is the kernel's entry point into the floating point emulator.
@@ -92,7 +92,7 @@ emulate:
92 mov r0, r6 @ prepare for EmulateAll() 92 mov r0, r6 @ prepare for EmulateAll()
93 bl EmulateAll @ emulate the instruction 93 bl EmulateAll @ emulate the instruction
94 cmp r0, #0 @ was emulation successful 94 cmp r0, #0 @ was emulation successful
95 moveq pc, r4 @ no, return failure 95 reteq r4 @ no, return failure
96 96
97next: 97next:
98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and 98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
@@ -102,7 +102,7 @@ next:
102 teq r2, #0x0C000000 102 teq r2, #0x0C000000
103 teqne r2, #0x0D000000 103 teqne r2, #0x0D000000
104 teqne r2, #0x0E000000 104 teqne r2, #0x0E000000
105 movne pc, r9 @ return ok if not a fp insn 105 retne r9 @ return ok if not a fp insn
106 106
107 str r5, [sp, #S_PC] @ update PC copy in regs 107 str r5, [sp, #S_PC] @ update PC copy in regs
108 108
@@ -115,7 +115,7 @@ next:
115 @ plain LDR instruction. Weird, but it seems harmless. 115 @ plain LDR instruction. Weird, but it seems harmless.
116 .pushsection .fixup,"ax" 116 .pushsection .fixup,"ax"
117 .align 2 117 .align 2
118.Lfix: mov pc, r9 @ let the user eat segfaults 118.Lfix: ret r9 @ let the user eat segfaults
119 .popsection 119 .popsection
120 120
121 .pushsection __ex_table,"a" 121 .pushsection __ex_table,"a"
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 99c63d4b6af8..e6a3c4c92163 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -107,10 +107,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
107 107
108 if (!user_mode(regs)) { 108 if (!user_mode(regs)) {
109 struct stackframe frame; 109 struct stackframe frame;
110 frame.fp = regs->ARM_fp; 110 arm_get_current_stackframe(regs, &frame);
111 frame.sp = regs->ARM_sp;
112 frame.lr = regs->ARM_lr;
113 frame.pc = regs->ARM_pc;
114 walk_stackframe(&frame, report_trace, &depth); 111 walk_stackframe(&frame, report_trace, &depth);
115 return; 112 return;
116 } 113 }
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index b5608b1f9fbd..1c98659bbf89 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -698,6 +698,8 @@ int omap_request_dma(int dev_id, const char *dev_name,
698 unsigned long flags; 698 unsigned long flags;
699 struct omap_dma_lch *chan; 699 struct omap_dma_lch *chan;
700 700
701 WARN(strcmp(dev_name, "DMA engine"), "Using deprecated platform DMA API - please update to DMA engine");
702
701 spin_lock_irqsave(&dma_chan_lock, flags); 703 spin_lock_irqsave(&dma_chan_lock, flags);
702 for (ch = 0; ch < dma_chan_count; ch++) { 704 for (ch = 0; ch < dma_chan_count; ch++) {
703 if (free_ch == -1 && dma_chan[ch].dev_id == -1) { 705 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index fe6ca574d093..2e78760f3495 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -34,7 +34,7 @@ ENDPROC(do_vfp)
34 34
35ENTRY(vfp_null_entry) 35ENTRY(vfp_null_entry)
36 dec_preempt_count_ti r10, r4 36 dec_preempt_count_ti r10, r4
37 mov pc, lr 37 ret lr
38ENDPROC(vfp_null_entry) 38ENDPROC(vfp_null_entry)
39 39
40 .align 2 40 .align 2
@@ -49,7 +49,7 @@ ENTRY(vfp_testing_entry)
49 dec_preempt_count_ti r10, r4 49 dec_preempt_count_ti r10, r4
50 ldr r0, VFP_arch_address 50 ldr r0, VFP_arch_address
51 str r0, [r0] @ set to non-zero value 51 str r0, [r0] @ set to non-zero value
52 mov pc, r9 @ we have handled the fault 52 ret r9 @ we have handled the fault
53ENDPROC(vfp_testing_entry) 53ENDPROC(vfp_testing_entry)
54 54
55 .align 2 55 .align 2
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index be807625ed8c..cda654cbf2c2 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -183,7 +183,7 @@ vfp_hw_state_valid:
183 @ always subtract 4 from the following 183 @ always subtract 4 from the following
184 @ instruction address. 184 @ instruction address.
185 dec_preempt_count_ti r10, r4 185 dec_preempt_count_ti r10, r4
186 mov pc, r9 @ we think we have handled things 186 ret r9 @ we think we have handled things
187 187
188 188
189look_for_VFP_exceptions: 189look_for_VFP_exceptions:
@@ -202,7 +202,7 @@ look_for_VFP_exceptions:
202 202
203 DBGSTR "not VFP" 203 DBGSTR "not VFP"
204 dec_preempt_count_ti r10, r4 204 dec_preempt_count_ti r10, r4
205 mov pc, lr 205 ret lr
206 206
207process_exception: 207process_exception:
208 DBGSTR "bounce" 208 DBGSTR "bounce"
@@ -234,7 +234,7 @@ ENTRY(vfp_save_state)
234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present) 234 VFPFMRX r12, FPINST2 @ FPINST2 if needed (and present)
2351: 2351:
236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2 236 stmia r0, {r1, r2, r3, r12} @ save FPEXC, FPSCR, FPINST, FPINST2
237 mov pc, lr 237 ret lr
238ENDPROC(vfp_save_state) 238ENDPROC(vfp_save_state)
239 239
240 .align 240 .align
@@ -245,7 +245,7 @@ vfp_current_hw_state_address:
245#ifdef CONFIG_THUMB2_KERNEL 245#ifdef CONFIG_THUMB2_KERNEL
246 adr \tmp, 1f 246 adr \tmp, 1f
247 add \tmp, \tmp, \base, lsl \shift 247 add \tmp, \tmp, \base, lsl \shift
248 mov pc, \tmp 248 ret \tmp
249#else 249#else
250 add pc, pc, \base, lsl \shift 250 add pc, pc, \base, lsl \shift
251 mov r0, r0 251 mov r0, r0
@@ -257,10 +257,10 @@ ENTRY(vfp_get_float)
257 tbl_branch r0, r3, #3 257 tbl_branch r0, r3, #3
258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 258 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0 2591: mrc p10, 0, r0, c\dr, c0, 0 @ fmrs r0, s0
260 mov pc, lr 260 ret lr
261 .org 1b + 8 261 .org 1b + 8
2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1 2621: mrc p10, 0, r0, c\dr, c0, 4 @ fmrs r0, s1
263 mov pc, lr 263 ret lr
264 .org 1b + 8 264 .org 1b + 8
265 .endr 265 .endr
266ENDPROC(vfp_get_float) 266ENDPROC(vfp_get_float)
@@ -269,10 +269,10 @@ ENTRY(vfp_put_float)
269 tbl_branch r1, r3, #3 269 tbl_branch r1, r3, #3
270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 270 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0 2711: mcr p10, 0, r0, c\dr, c0, 0 @ fmsr r0, s0
272 mov pc, lr 272 ret lr
273 .org 1b + 8 273 .org 1b + 8
2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1 2741: mcr p10, 0, r0, c\dr, c0, 4 @ fmsr r0, s1
275 mov pc, lr 275 ret lr
276 .org 1b + 8 276 .org 1b + 8
277 .endr 277 .endr
278ENDPROC(vfp_put_float) 278ENDPROC(vfp_put_float)
@@ -281,14 +281,14 @@ ENTRY(vfp_get_double)
281 tbl_branch r0, r3, #3 281 tbl_branch r0, r3, #3
282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 282 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2831: fmrrd r0, r1, d\dr 2831: fmrrd r0, r1, d\dr
284 mov pc, lr 284 ret lr
285 .org 1b + 8 285 .org 1b + 8
286 .endr 286 .endr
287#ifdef CONFIG_VFPv3 287#ifdef CONFIG_VFPv3
288 @ d16 - d31 registers 288 @ d16 - d31 registers
289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 289 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr 2901: mrrc p11, 3, r0, r1, c\dr @ fmrrd r0, r1, d\dr
291 mov pc, lr 291 ret lr
292 .org 1b + 8 292 .org 1b + 8
293 .endr 293 .endr
294#endif 294#endif
@@ -296,21 +296,21 @@ ENTRY(vfp_get_double)
296 @ virtual register 16 (or 32 if VFPv3) for compare with zero 296 @ virtual register 16 (or 32 if VFPv3) for compare with zero
297 mov r0, #0 297 mov r0, #0
298 mov r1, #0 298 mov r1, #0
299 mov pc, lr 299 ret lr
300ENDPROC(vfp_get_double) 300ENDPROC(vfp_get_double)
301 301
302ENTRY(vfp_put_double) 302ENTRY(vfp_put_double)
303 tbl_branch r2, r3, #3 303 tbl_branch r2, r3, #3
304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 304 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3051: fmdrr d\dr, r0, r1 3051: fmdrr d\dr, r0, r1
306 mov pc, lr 306 ret lr
307 .org 1b + 8 307 .org 1b + 8
308 .endr 308 .endr
309#ifdef CONFIG_VFPv3 309#ifdef CONFIG_VFPv3
310 @ d16 - d31 registers 310 @ d16 - d31 registers
311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 311 .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr 3121: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr
313 mov pc, lr 313 ret lr
314 .org 1b + 8 314 .org 1b + 8
315 .endr 315 .endr
316#endif 316#endif
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index 44e3a5f10c4c..f00e08075938 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -58,7 +58,7 @@
58ENTRY(HYPERVISOR_##hypercall) \ 58ENTRY(HYPERVISOR_##hypercall) \
59 mov r12, #__HYPERVISOR_##hypercall; \ 59 mov r12, #__HYPERVISOR_##hypercall; \
60 __HVC(XEN_IMM); \ 60 __HVC(XEN_IMM); \
61 mov pc, lr; \ 61 ret lr; \
62ENDPROC(HYPERVISOR_##hypercall) 62ENDPROC(HYPERVISOR_##hypercall)
63 63
64#define HYPERCALL0 HYPERCALL_SIMPLE 64#define HYPERCALL0 HYPERCALL_SIMPLE
@@ -74,7 +74,7 @@ ENTRY(HYPERVISOR_##hypercall) \
74 mov r12, #__HYPERVISOR_##hypercall; \ 74 mov r12, #__HYPERVISOR_##hypercall; \
75 __HVC(XEN_IMM); \ 75 __HVC(XEN_IMM); \
76 ldm sp!, {r4} \ 76 ldm sp!, {r4} \
77 mov pc, lr \ 77 ret lr \
78ENDPROC(HYPERVISOR_##hypercall) 78ENDPROC(HYPERVISOR_##hypercall)
79 79
80 .text 80 .text
@@ -101,5 +101,5 @@ ENTRY(privcmd_call)
101 ldr r4, [sp, #4] 101 ldr r4, [sp, #4]
102 __HVC(XEN_IMM) 102 __HVC(XEN_IMM)
103 ldm sp!, {r4} 103 ldm sp!, {r4}
104 mov pc, lr 104 ret lr
105ENDPROC(privcmd_call); 105ENDPROC(privcmd_call);
diff --git a/arch/ia64/include/uapi/asm/fcntl.h b/arch/ia64/include/uapi/asm/fcntl.h
index 1dd275dc8f65..7b485876cad4 100644
--- a/arch/ia64/include/uapi/asm/fcntl.h
+++ b/arch/ia64/include/uapi/asm/fcntl.h
@@ -8,6 +8,7 @@
8#define force_o_largefile() \ 8#define force_o_largefile() \
9 (personality(current->personality) != PER_LINUX32) 9 (personality(current->personality) != PER_LINUX32)
10 10
11#include <linux/personality.h>
11#include <asm-generic/fcntl.h> 12#include <asm-generic/fcntl.h>
12 13
13#endif /* _ASM_IA64_FCNTL_H */ 14#endif /* _ASM_IA64_FCNTL_H */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a469acee33c..4e238e6e661c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -269,6 +269,7 @@ config LANTIQ
269config LASAT 269config LASAT
270 bool "LASAT Networks platforms" 270 bool "LASAT Networks platforms"
271 select CEVT_R4K 271 select CEVT_R4K
272 select CRC32
272 select CSRC_R4K 273 select CSRC_R4K
273 select DMA_NONCOHERENT 274 select DMA_NONCOHERENT
274 select SYS_HAS_EARLY_PRINTK 275 select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index f54bdbe85c0d..eeeb0f48c767 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,8 +32,6 @@ struct sigcontext32 {
32 __u32 sc_lo2; 32 __u32 sc_lo2;
33 __u32 sc_hi3; 33 __u32 sc_hi3;
34 __u32 sc_lo3; 34 __u32 sc_lo3;
35 __u64 sc_msaregs[32]; /* Most significant 64 bits */
36 __u32 sc_msa_csr;
37}; 35};
38#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 36#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
39#endif /* _ASM_SIGCONTEXT_H */ 37#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index f8d63b3b40b4..708c5d414905 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
67#define Ip_u2s3u1(op) \ 67#define Ip_u2s3u1(op) \
68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) 68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
69 69
70#define Ip_s3s1s2(op) \
71void ISAOPC(op)(u32 **buf, int a, int b, int c)
72
70#define Ip_u2u1s3(op) \ 73#define Ip_u2u1s3(op) \
71void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) 74void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
72 75
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
147Ip_u2s3u1(_sd); 150Ip_u2s3u1(_sd);
148Ip_u2u1u3(_sll); 151Ip_u2u1u3(_sll);
149Ip_u3u2u1(_sllv); 152Ip_u3u2u1(_sllv);
153Ip_s3s1s2(_slt);
150Ip_u2u1s3(_sltiu); 154Ip_u2u1s3(_sltiu);
151Ip_u3u1u2(_sltu); 155Ip_u3u1u2(_sltu);
152Ip_u2u1u3(_sra); 156Ip_u2u1u3(_sra);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4b7160259292..4bfdb9d4c186 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
273 mm_and_op = 0x250, 273 mm_and_op = 0x250,
274 mm_or32_op = 0x290, 274 mm_or32_op = 0x290,
275 mm_xor32_op = 0x310, 275 mm_xor32_op = 0x310,
276 mm_slt_op = 0x350,
276 mm_sltu_op = 0x390, 277 mm_sltu_op = 0x390,
277}; 278};
278 279
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 681c17603a48..6c9906f59c6e 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,10 +12,6 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/sgidefs.h> 13#include <asm/sgidefs.h>
14 14
15/* Bits which may be set in sc_used_math */
16#define USEDMATH_FP (1 << 0)
17#define USEDMATH_MSA (1 << 1)
18
19#if _MIPS_SIM == _MIPS_SIM_ABI32 15#if _MIPS_SIM == _MIPS_SIM_ABI32
20 16
21/* 17/*
@@ -41,8 +37,6 @@ struct sigcontext {
41 unsigned long sc_lo2; 37 unsigned long sc_lo2;
42 unsigned long sc_hi3; 38 unsigned long sc_hi3;
43 unsigned long sc_lo3; 39 unsigned long sc_lo3;
44 unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
45 unsigned long sc_msa_csr;
46}; 40};
47 41
48#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 42#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
76 __u32 sc_used_math; 70 __u32 sc_used_math;
77 __u32 sc_dsp; 71 __u32 sc_dsp;
78 __u32 sc_reserved; 72 __u32 sc_reserved;
79 __u64 sc_msaregs[32];
80 __u32 sc_msa_csr;
81}; 73};
82 74
83 75
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 02f075df8f2e..4bb5107511e2 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -293,7 +293,6 @@ void output_sc_defines(void)
293 OFFSET(SC_LO2, sigcontext, sc_lo2); 293 OFFSET(SC_LO2, sigcontext, sc_lo2);
294 OFFSET(SC_HI3, sigcontext, sc_hi3); 294 OFFSET(SC_HI3, sigcontext, sc_hi3);
295 OFFSET(SC_LO3, sigcontext, sc_lo3); 295 OFFSET(SC_LO3, sigcontext, sc_lo3);
296 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
297 BLANK(); 296 BLANK();
298} 297}
299#endif 298#endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
308 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 307 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
309 OFFSET(SC_PC, sigcontext, sc_pc); 308 OFFSET(SC_PC, sigcontext, sc_pc);
310 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 309 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
311 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
312 BLANK(); 310 BLANK();
313} 311}
314#endif 312#endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
320 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 318 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
321 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 319 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
322 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 320 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
323 OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
324 BLANK(); 321 BLANK();
325} 322}
326#endif 323#endif
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4858642d543d..a734b2c2f9ea 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
126 126
127 board_bind_eic_interrupt = &msc_bind_eic_interrupt; 127 board_bind_eic_interrupt = &msc_bind_eic_interrupt;
128 128
129 for (; nirq >= 0; nirq--, imp++) { 129 for (; nirq > 0; nirq--, imp++) {
130 int n = imp->im_irq; 130 int n = imp->im_irq;
131 131
132 switch (imp->im_type) { 132 switch (imp->im_type) {
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5aa4c6f8cf83..c4c2069d3a20 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
101 if (!coupled_coherence) 101 if (!coupled_coherence)
102 return; 102 return;
103 103
104 smp_mb__before_atomic_inc(); 104 smp_mb__before_atomic();
105 atomic_inc(a); 105 atomic_inc(a);
106 106
107 while (atomic_read(a) < online) 107 while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
158 158
159 /* Indicate that this CPU might not be coherent */ 159 /* Indicate that this CPU might not be coherent */
160 cpumask_clear_cpu(cpu, &cpu_coherent_mask); 160 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
161 smp_mb__after_clear_bit(); 161 smp_mb__after_atomic();
162 162
163 /* Create a non-coherent mapping of the core ready_count */ 163 /* Create a non-coherent mapping of the core ready_count */
164 core_ready_count = per_cpu(ready_count, core); 164 core_ready_count = per_cpu(ready_count, core);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 71814272d148..8352523568e6 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,7 +13,6 @@
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */ 14 */
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/asmmacro.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/fpregdef.h> 17#include <asm/fpregdef.h>
19#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
246 END(_restore_fp_context32) 245 END(_restore_fp_context32)
247#endif 246#endif
248 247
249#ifdef CONFIG_CPU_HAS_MSA
250
251 .macro save_sc_msareg wr, off, sc, tmp
252#ifdef CONFIG_64BIT
253 copy_u_d \tmp, \wr, 1
254 EX sd \tmp, (\off+(\wr*8))(\sc)
255#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
256 copy_u_w \tmp, \wr, 2
257 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
258 copy_u_w \tmp, \wr, 3
259 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
260#else /* CONFIG_CPU_BIG_ENDIAN */
261 copy_u_w \tmp, \wr, 2
262 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
263 copy_u_w \tmp, \wr, 3
264 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
265#endif
266 .endm
267
268/*
269 * int _save_msa_context(struct sigcontext *sc)
270 *
271 * Save the upper 64 bits of each vector register along with the MSA_CSR
272 * register into sc. Returns zero on success, else non-zero.
273 */
274LEAF(_save_msa_context)
275 save_sc_msareg 0, SC_MSAREGS, a0, t0
276 save_sc_msareg 1, SC_MSAREGS, a0, t0
277 save_sc_msareg 2, SC_MSAREGS, a0, t0
278 save_sc_msareg 3, SC_MSAREGS, a0, t0
279 save_sc_msareg 4, SC_MSAREGS, a0, t0
280 save_sc_msareg 5, SC_MSAREGS, a0, t0
281 save_sc_msareg 6, SC_MSAREGS, a0, t0
282 save_sc_msareg 7, SC_MSAREGS, a0, t0
283 save_sc_msareg 8, SC_MSAREGS, a0, t0
284 save_sc_msareg 9, SC_MSAREGS, a0, t0
285 save_sc_msareg 10, SC_MSAREGS, a0, t0
286 save_sc_msareg 11, SC_MSAREGS, a0, t0
287 save_sc_msareg 12, SC_MSAREGS, a0, t0
288 save_sc_msareg 13, SC_MSAREGS, a0, t0
289 save_sc_msareg 14, SC_MSAREGS, a0, t0
290 save_sc_msareg 15, SC_MSAREGS, a0, t0
291 save_sc_msareg 16, SC_MSAREGS, a0, t0
292 save_sc_msareg 17, SC_MSAREGS, a0, t0
293 save_sc_msareg 18, SC_MSAREGS, a0, t0
294 save_sc_msareg 19, SC_MSAREGS, a0, t0
295 save_sc_msareg 20, SC_MSAREGS, a0, t0
296 save_sc_msareg 21, SC_MSAREGS, a0, t0
297 save_sc_msareg 22, SC_MSAREGS, a0, t0
298 save_sc_msareg 23, SC_MSAREGS, a0, t0
299 save_sc_msareg 24, SC_MSAREGS, a0, t0
300 save_sc_msareg 25, SC_MSAREGS, a0, t0
301 save_sc_msareg 26, SC_MSAREGS, a0, t0
302 save_sc_msareg 27, SC_MSAREGS, a0, t0
303 save_sc_msareg 28, SC_MSAREGS, a0, t0
304 save_sc_msareg 29, SC_MSAREGS, a0, t0
305 save_sc_msareg 30, SC_MSAREGS, a0, t0
306 save_sc_msareg 31, SC_MSAREGS, a0, t0
307 jr ra
308 li v0, 0
309 END(_save_msa_context)
310
311#ifdef CONFIG_MIPS32_COMPAT
312
313/*
314 * int _save_msa_context32(struct sigcontext32 *sc)
315 *
316 * Save the upper 64 bits of each vector register along with the MSA_CSR
317 * register into sc. Returns zero on success, else non-zero.
318 */
319LEAF(_save_msa_context32)
320 save_sc_msareg 0, SC32_MSAREGS, a0, t0
321 save_sc_msareg 1, SC32_MSAREGS, a0, t0
322 save_sc_msareg 2, SC32_MSAREGS, a0, t0
323 save_sc_msareg 3, SC32_MSAREGS, a0, t0
324 save_sc_msareg 4, SC32_MSAREGS, a0, t0
325 save_sc_msareg 5, SC32_MSAREGS, a0, t0
326 save_sc_msareg 6, SC32_MSAREGS, a0, t0
327 save_sc_msareg 7, SC32_MSAREGS, a0, t0
328 save_sc_msareg 8, SC32_MSAREGS, a0, t0
329 save_sc_msareg 9, SC32_MSAREGS, a0, t0
330 save_sc_msareg 10, SC32_MSAREGS, a0, t0
331 save_sc_msareg 11, SC32_MSAREGS, a0, t0
332 save_sc_msareg 12, SC32_MSAREGS, a0, t0
333 save_sc_msareg 13, SC32_MSAREGS, a0, t0
334 save_sc_msareg 14, SC32_MSAREGS, a0, t0
335 save_sc_msareg 15, SC32_MSAREGS, a0, t0
336 save_sc_msareg 16, SC32_MSAREGS, a0, t0
337 save_sc_msareg 17, SC32_MSAREGS, a0, t0
338 save_sc_msareg 18, SC32_MSAREGS, a0, t0
339 save_sc_msareg 19, SC32_MSAREGS, a0, t0
340 save_sc_msareg 20, SC32_MSAREGS, a0, t0
341 save_sc_msareg 21, SC32_MSAREGS, a0, t0
342 save_sc_msareg 22, SC32_MSAREGS, a0, t0
343 save_sc_msareg 23, SC32_MSAREGS, a0, t0
344 save_sc_msareg 24, SC32_MSAREGS, a0, t0
345 save_sc_msareg 25, SC32_MSAREGS, a0, t0
346 save_sc_msareg 26, SC32_MSAREGS, a0, t0
347 save_sc_msareg 27, SC32_MSAREGS, a0, t0
348 save_sc_msareg 28, SC32_MSAREGS, a0, t0
349 save_sc_msareg 29, SC32_MSAREGS, a0, t0
350 save_sc_msareg 30, SC32_MSAREGS, a0, t0
351 save_sc_msareg 31, SC32_MSAREGS, a0, t0
352 jr ra
353 li v0, 0
354 END(_save_msa_context32)
355
356#endif /* CONFIG_MIPS32_COMPAT */
357
358 .macro restore_sc_msareg wr, off, sc, tmp
359#ifdef CONFIG_64BIT
360 EX ld \tmp, (\off+(\wr*8))(\sc)
361 insert_d \wr, 1, \tmp
362#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
363 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
364 insert_w \wr, 2, \tmp
365 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
366 insert_w \wr, 3, \tmp
367#else /* CONFIG_CPU_BIG_ENDIAN */
368 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
369 insert_w \wr, 2, \tmp
370 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
371 insert_w \wr, 3, \tmp
372#endif
373 .endm
374
375/*
376 * int _restore_msa_context(struct sigcontext *sc)
377 */
378LEAF(_restore_msa_context)
379 restore_sc_msareg 0, SC_MSAREGS, a0, t0
380 restore_sc_msareg 1, SC_MSAREGS, a0, t0
381 restore_sc_msareg 2, SC_MSAREGS, a0, t0
382 restore_sc_msareg 3, SC_MSAREGS, a0, t0
383 restore_sc_msareg 4, SC_MSAREGS, a0, t0
384 restore_sc_msareg 5, SC_MSAREGS, a0, t0
385 restore_sc_msareg 6, SC_MSAREGS, a0, t0
386 restore_sc_msareg 7, SC_MSAREGS, a0, t0
387 restore_sc_msareg 8, SC_MSAREGS, a0, t0
388 restore_sc_msareg 9, SC_MSAREGS, a0, t0
389 restore_sc_msareg 10, SC_MSAREGS, a0, t0
390 restore_sc_msareg 11, SC_MSAREGS, a0, t0
391 restore_sc_msareg 12, SC_MSAREGS, a0, t0
392 restore_sc_msareg 13, SC_MSAREGS, a0, t0
393 restore_sc_msareg 14, SC_MSAREGS, a0, t0
394 restore_sc_msareg 15, SC_MSAREGS, a0, t0
395 restore_sc_msareg 16, SC_MSAREGS, a0, t0
396 restore_sc_msareg 17, SC_MSAREGS, a0, t0
397 restore_sc_msareg 18, SC_MSAREGS, a0, t0
398 restore_sc_msareg 19, SC_MSAREGS, a0, t0
399 restore_sc_msareg 20, SC_MSAREGS, a0, t0
400 restore_sc_msareg 21, SC_MSAREGS, a0, t0
401 restore_sc_msareg 22, SC_MSAREGS, a0, t0
402 restore_sc_msareg 23, SC_MSAREGS, a0, t0
403 restore_sc_msareg 24, SC_MSAREGS, a0, t0
404 restore_sc_msareg 25, SC_MSAREGS, a0, t0
405 restore_sc_msareg 26, SC_MSAREGS, a0, t0
406 restore_sc_msareg 27, SC_MSAREGS, a0, t0
407 restore_sc_msareg 28, SC_MSAREGS, a0, t0
408 restore_sc_msareg 29, SC_MSAREGS, a0, t0
409 restore_sc_msareg 30, SC_MSAREGS, a0, t0
410 restore_sc_msareg 31, SC_MSAREGS, a0, t0
411 jr ra
412 li v0, 0
413 END(_restore_msa_context)
414
415#ifdef CONFIG_MIPS32_COMPAT
416
417/*
418 * int _restore_msa_context32(struct sigcontext32 *sc)
419 */
420LEAF(_restore_msa_context32)
421 restore_sc_msareg 0, SC32_MSAREGS, a0, t0
422 restore_sc_msareg 1, SC32_MSAREGS, a0, t0
423 restore_sc_msareg 2, SC32_MSAREGS, a0, t0
424 restore_sc_msareg 3, SC32_MSAREGS, a0, t0
425 restore_sc_msareg 4, SC32_MSAREGS, a0, t0
426 restore_sc_msareg 5, SC32_MSAREGS, a0, t0
427 restore_sc_msareg 6, SC32_MSAREGS, a0, t0
428 restore_sc_msareg 7, SC32_MSAREGS, a0, t0
429 restore_sc_msareg 8, SC32_MSAREGS, a0, t0
430 restore_sc_msareg 9, SC32_MSAREGS, a0, t0
431 restore_sc_msareg 10, SC32_MSAREGS, a0, t0
432 restore_sc_msareg 11, SC32_MSAREGS, a0, t0
433 restore_sc_msareg 12, SC32_MSAREGS, a0, t0
434 restore_sc_msareg 13, SC32_MSAREGS, a0, t0
435 restore_sc_msareg 14, SC32_MSAREGS, a0, t0
436 restore_sc_msareg 15, SC32_MSAREGS, a0, t0
437 restore_sc_msareg 16, SC32_MSAREGS, a0, t0
438 restore_sc_msareg 17, SC32_MSAREGS, a0, t0
439 restore_sc_msareg 18, SC32_MSAREGS, a0, t0
440 restore_sc_msareg 19, SC32_MSAREGS, a0, t0
441 restore_sc_msareg 20, SC32_MSAREGS, a0, t0
442 restore_sc_msareg 21, SC32_MSAREGS, a0, t0
443 restore_sc_msareg 22, SC32_MSAREGS, a0, t0
444 restore_sc_msareg 23, SC32_MSAREGS, a0, t0
445 restore_sc_msareg 24, SC32_MSAREGS, a0, t0
446 restore_sc_msareg 25, SC32_MSAREGS, a0, t0
447 restore_sc_msareg 26, SC32_MSAREGS, a0, t0
448 restore_sc_msareg 27, SC32_MSAREGS, a0, t0
449 restore_sc_msareg 28, SC32_MSAREGS, a0, t0
450 restore_sc_msareg 29, SC32_MSAREGS, a0, t0
451 restore_sc_msareg 30, SC32_MSAREGS, a0, t0
452 restore_sc_msareg 31, SC32_MSAREGS, a0, t0
453 jr ra
454 li v0, 0
455 END(_restore_msa_context32)
456
457#endif /* CONFIG_MIPS32_COMPAT */
458
459#endif /* CONFIG_CPU_HAS_MSA */
460
461 .set reorder 248 .set reorder
462 249
463 .type fault@function 250 .type fault@function
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 33133d3df3e5..9e60d117e41e 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -31,7 +31,6 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/msa.h>
35#include <asm/sim.h> 34#include <asm/sim.h>
36#include <asm/ucontext.h> 35#include <asm/ucontext.h>
37#include <asm/cpu-features.h> 36#include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
48extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 47extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
49extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 48extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
50 49
51extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
52extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
53
54struct sigframe { 50struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */ 51 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_pad[2]; /* Was: signal trampoline */ 52 u32 sf_pad[2]; /* Was: signal trampoline */
@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
100} 96}
101 97
102/* 98/*
103 * These functions will save only the upper 64 bits of the vector registers,
104 * since the lower 64 bits have already been saved as the scalar FP context.
105 */
106static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
107{
108 int i;
109 int err = 0;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
114 &sc->sc_msaregs[i]);
115 }
116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
117
118 return err;
119}
120
121static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
122{
123 int i;
124 int err = 0;
125 u64 val;
126
127 for (i = 0; i < NUM_FPU_REGS; i++) {
128 err |= __get_user(val, &sc->sc_msaregs[i]);
129 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
130 }
131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136/*
137 * Helper routines 99 * Helper routines
138 */ 100 */
139static int protected_save_fp_context(struct sigcontext __user *sc, 101static int protected_save_fp_context(struct sigcontext __user *sc)
140 unsigned used_math)
141{ 102{
142 int err; 103 int err;
143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
144#ifndef CONFIG_EVA 104#ifndef CONFIG_EVA
145 while (1) { 105 while (1) {
146 lock_fpu_owner(); 106 lock_fpu_owner();
147 if (is_fpu_owner()) { 107 if (is_fpu_owner()) {
148 err = save_fp_context(sc); 108 err = save_fp_context(sc);
149 if (save_msa && !err)
150 err = _save_msa_context(sc);
151 unlock_fpu_owner(); 109 unlock_fpu_owner();
152 } else { 110 } else {
153 unlock_fpu_owner(); 111 unlock_fpu_owner();
154 err = copy_fp_to_sigcontext(sc); 112 err = copy_fp_to_sigcontext(sc);
155 if (save_msa && !err)
156 err = copy_msa_to_sigcontext(sc);
157 } 113 }
158 if (likely(!err)) 114 if (likely(!err))
159 break; 115 break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
169 * EVA does not have FPU EVA instructions so saving fpu context directly 125 * EVA does not have FPU EVA instructions so saving fpu context directly
170 * does not work. 126 * does not work.
171 */ 127 */
172 disable_msa();
173 lose_fpu(1); 128 lose_fpu(1);
174 err = save_fp_context(sc); /* this might fail */ 129 err = save_fp_context(sc); /* this might fail */
175 if (save_msa && !err)
176 err = copy_msa_to_sigcontext(sc);
177#endif 130#endif
178 return err; 131 return err;
179} 132}
180 133
181static int protected_restore_fp_context(struct sigcontext __user *sc, 134static int protected_restore_fp_context(struct sigcontext __user *sc)
182 unsigned used_math)
183{ 135{
184 int err, tmp __maybe_unused; 136 int err, tmp __maybe_unused;
185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
186#ifndef CONFIG_EVA 137#ifndef CONFIG_EVA
187 while (1) { 138 while (1) {
188 lock_fpu_owner(); 139 lock_fpu_owner();
189 if (is_fpu_owner()) { 140 if (is_fpu_owner()) {
190 err = restore_fp_context(sc); 141 err = restore_fp_context(sc);
191 if (restore_msa && !err) {
192 enable_msa();
193 err = _restore_msa_context(sc);
194 } else {
195 /* signal handler may have used MSA */
196 disable_msa();
197 }
198 unlock_fpu_owner(); 142 unlock_fpu_owner();
199 } else { 143 } else {
200 unlock_fpu_owner(); 144 unlock_fpu_owner();
201 err = copy_fp_from_sigcontext(sc); 145 err = copy_fp_from_sigcontext(sc);
202 if (!err && (used_math & USEDMATH_MSA))
203 err = copy_msa_from_sigcontext(sc);
204 } 146 }
205 if (likely(!err)) 147 if (likely(!err))
206 break; 148 break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
216 * EVA does not have FPU EVA instructions so restoring fpu context 158 * EVA does not have FPU EVA instructions so restoring fpu context
217 * directly does not work. 159 * directly does not work.
218 */ 160 */
219 enable_msa();
220 lose_fpu(0); 161 lose_fpu(0);
221 err = restore_fp_context(sc); /* this might fail */ 162 err = restore_fp_context(sc); /* this might fail */
222 if (restore_msa && !err)
223 err = copy_msa_from_sigcontext(sc);
224#endif 163#endif
225 return err; 164 return err;
226} 165}
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 191 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
253 } 192 }
254 193
255 used_math = used_math() ? USEDMATH_FP : 0; 194 used_math = !!used_math();
256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
257 err |= __put_user(used_math, &sc->sc_used_math); 195 err |= __put_user(used_math, &sc->sc_used_math);
258 196
259 if (used_math) { 197 if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
261 * Save FPU state to signal context. Signal handler 199 * Save FPU state to signal context. Signal handler
262 * will "inherit" current FPU state. 200 * will "inherit" current FPU state.
263 */ 201 */
264 err |= protected_save_fp_context(sc, used_math); 202 err |= protected_save_fp_context(sc);
265 } 203 }
266 return err; 204 return err;
267} 205}
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
286} 224}
287 225
288static int 226static int
289check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) 227check_and_restore_fp_context(struct sigcontext __user *sc)
290{ 228{
291 int err, sig; 229 int err, sig;
292 230
293 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 231 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
294 if (err > 0) 232 if (err > 0)
295 err = 0; 233 err = 0;
296 err |= protected_restore_fp_context(sc, used_math); 234 err |= protected_restore_fp_context(sc);
297 return err ?: sig; 235 return err ?: sig;
298} 236}
299 237
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
333 if (used_math) { 271 if (used_math) {
334 /* restore fpu context if we have used it before */ 272 /* restore fpu context if we have used it before */
335 if (!err) 273 if (!err)
336 err = check_and_restore_fp_context(sc, used_math); 274 err = check_and_restore_fp_context(sc);
337 } else { 275 } else {
338 /* signal handler may have used FPU or MSA. Disable them. */ 276 /* signal handler may have used FPU. Give it up. */
339 disable_msa();
340 lose_fpu(0); 277 lose_fpu(0);
341 } 278 }
342 279
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 299f956e4db3..bae2e6ee2109 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,7 +30,6 @@
30#include <asm/sim.h> 30#include <asm/sim.h>
31#include <asm/ucontext.h> 31#include <asm/ucontext.h>
32#include <asm/fpu.h> 32#include <asm/fpu.h>
33#include <asm/msa.h>
34#include <asm/war.h> 33#include <asm/war.h>
35#include <asm/vdso.h> 34#include <asm/vdso.h>
36#include <asm/dsp.h> 35#include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
43extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 42extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
44extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 43extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
45 44
46extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
47extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
48
49/* 45/*
50 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 46 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
51 */ 47 */
@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
115} 111}
116 112
117/* 113/*
118 * These functions will save only the upper 64 bits of the vector registers,
119 * since the lower 64 bits have already been saved as the scalar FP context.
120 */
121static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
122{
123 int i;
124 int err = 0;
125
126 for (i = 0; i < NUM_FPU_REGS; i++) {
127 err |=
128 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 &sc->sc_msaregs[i]);
130 }
131 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
137{
138 int i;
139 int err = 0;
140 u64 val;
141
142 for (i = 0; i < NUM_FPU_REGS; i++) {
143 err |= __get_user(val, &sc->sc_msaregs[i]);
144 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
145 }
146 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
147
148 return err;
149}
150
151/*
152 * sigcontext handlers 114 * sigcontext handlers
153 */ 115 */
154static int protected_save_fp_context32(struct sigcontext32 __user *sc, 116static int protected_save_fp_context32(struct sigcontext32 __user *sc)
155 unsigned used_math)
156{ 117{
157 int err; 118 int err;
158 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
159 while (1) { 119 while (1) {
160 lock_fpu_owner(); 120 lock_fpu_owner();
161 if (is_fpu_owner()) { 121 if (is_fpu_owner()) {
162 err = save_fp_context32(sc); 122 err = save_fp_context32(sc);
163 if (save_msa && !err)
164 err = _save_msa_context32(sc);
165 unlock_fpu_owner(); 123 unlock_fpu_owner();
166 } else { 124 } else {
167 unlock_fpu_owner(); 125 unlock_fpu_owner();
168 err = copy_fp_to_sigcontext32(sc); 126 err = copy_fp_to_sigcontext32(sc);
169 if (save_msa && !err)
170 err = copy_msa_to_sigcontext32(sc);
171 } 127 }
172 if (likely(!err)) 128 if (likely(!err))
173 break; 129 break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
181 return err; 137 return err;
182} 138}
183 139
184static int protected_restore_fp_context32(struct sigcontext32 __user *sc, 140static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
185 unsigned used_math)
186{ 141{
187 int err, tmp __maybe_unused; 142 int err, tmp __maybe_unused;
188 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
189 while (1) { 143 while (1) {
190 lock_fpu_owner(); 144 lock_fpu_owner();
191 if (is_fpu_owner()) { 145 if (is_fpu_owner()) {
192 err = restore_fp_context32(sc); 146 err = restore_fp_context32(sc);
193 if (restore_msa && !err) {
194 enable_msa();
195 err = _restore_msa_context32(sc);
196 } else {
197 /* signal handler may have used MSA */
198 disable_msa();
199 }
200 unlock_fpu_owner(); 147 unlock_fpu_owner();
201 } else { 148 } else {
202 unlock_fpu_owner(); 149 unlock_fpu_owner();
203 err = copy_fp_from_sigcontext32(sc); 150 err = copy_fp_from_sigcontext32(sc);
204 if (restore_msa && !err)
205 err = copy_msa_from_sigcontext32(sc);
206 } 151 }
207 if (likely(!err)) 152 if (likely(!err))
208 break; 153 break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
241 err |= __put_user(mflo3(), &sc->sc_lo3); 186 err |= __put_user(mflo3(), &sc->sc_lo3);
242 } 187 }
243 188
244 used_math = used_math() ? USEDMATH_FP : 0; 189 used_math = !!used_math();
245 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
246 err |= __put_user(used_math, &sc->sc_used_math); 190 err |= __put_user(used_math, &sc->sc_used_math);
247 191
248 if (used_math) { 192 if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
250 * Save FPU state to signal context. Signal handler 194 * Save FPU state to signal context. Signal handler
251 * will "inherit" current FPU state. 195 * will "inherit" current FPU state.
252 */ 196 */
253 err |= protected_save_fp_context32(sc, used_math); 197 err |= protected_save_fp_context32(sc);
254 } 198 }
255 return err; 199 return err;
256} 200}
257 201
258static int 202static int
259check_and_restore_fp_context32(struct sigcontext32 __user *sc, 203check_and_restore_fp_context32(struct sigcontext32 __user *sc)
260 unsigned used_math)
261{ 204{
262 int err, sig; 205 int err, sig;
263 206
264 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 207 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
265 if (err > 0) 208 if (err > 0)
266 err = 0; 209 err = 0;
267 err |= protected_restore_fp_context32(sc, used_math); 210 err |= protected_restore_fp_context32(sc);
268 return err ?: sig; 211 return err ?: sig;
269} 212}
270 213
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
301 if (used_math) { 244 if (used_math) {
302 /* restore fpu context if we have used it before */ 245 /* restore fpu context if we have used it before */
303 if (!err) 246 if (!err)
304 err = check_and_restore_fp_context32(sc, used_math); 247 err = check_and_restore_fp_context32(sc);
305 } else { 248 } else {
306 /* signal handler may have used FPU or MSA. Disable them. */ 249 /* signal handler may have used FPU. Give it up. */
307 disable_msa();
308 lose_fpu(0); 250 lose_fpu(0);
309 } 251 }
310 252
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index df0598d9bfdd..949f2c6827a0 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
301 301
302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
304 smp_mb__after_atomic_dec(); 304 smp_mb__after_atomic();
305 set_cpu_online(cpu, false); 305 set_cpu_online(cpu, false);
306 cpu_clear(cpu, cpu_callin_map); 306 cpu_clear(cpu, cpu_callin_map);
307 307
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 53f1d2287084..8e97acbbe22c 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -34,13 +34,22 @@
34 * Special constants 34 * Special constants
35 */ 35 */
36 36
37#define DPCNST(s, b, m) \ 37/*
38 * Older GCC requires the inner braces for initialization of union ieee754dp's
39 * anonymous struct member. Without an error will result.
40 */
41#define xPCNST(s, b, m, ebias) \
38{ \ 42{ \
39 .sign = (s), \ 43 { \
40 .bexp = (b) + DP_EBIAS, \ 44 .sign = (s), \
41 .mant = (m) \ 45 .bexp = (b) + ebias, \
46 .mant = (m) \
47 } \
42} 48}
43 49
50#define DPCNST(s, b, m) \
51 xPCNST(s, b, m, DP_EBIAS)
52
44const union ieee754dp __ieee754dp_spcvals[] = { 53const union ieee754dp __ieee754dp_spcvals[] = {
45 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */ 54 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
46 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */ 55 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
62}; 71};
63 72
64#define SPCNST(s, b, m) \ 73#define SPCNST(s, b, m) \
65{ \ 74 xPCNST(s, b, m, SP_EBIAS)
66 .sign = (s), \
67 .bexp = (b) + SP_EBIAS, \
68 .mant = (m) \
69}
70 75
71const union ieee754sp __ieee754sp_spcvals[] = { 76const union ieee754sp __ieee754sp_spcvals[] = {
72 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */ 77 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 775c2800cba2..8399ddf03a02 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
102 { insn_sd, 0, 0 }, 102 { insn_sd, 0, 0 },
103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, 103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, 104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
105 { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
105 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 106 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
106 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, 107 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
107 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, 108 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 38792c2364f5..6708a2dbf934 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 92 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
113 { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD },
113 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 114 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
114 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, 115 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
115 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 116 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 00515805fe41..a01b0d6cedd2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -53,7 +53,7 @@ enum opcode {
53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, 53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, 54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
56 insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra, 56 insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
59 insn_xor, insn_xori, insn_yield, 59 insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op) \
139} \ 139} \
140UASM_EXPORT_SYMBOL(uasm_i##op); 140UASM_EXPORT_SYMBOL(uasm_i##op);
141 141
142#define I_s3s1s2(op) \
143Ip_s3s1s2(op) \
144{ \
145 build_insn(buf, insn##op, b, c, a); \
146} \
147UASM_EXPORT_SYMBOL(uasm_i##op);
148
142#define I_u2u1u3(op) \ 149#define I_u2u1u3(op) \
143Ip_u2u1u3(op) \ 150Ip_u2u1u3(op) \
144{ \ 151{ \
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
289I_u2s3u1(_sd) 296I_u2s3u1(_sd)
290I_u2u1u3(_sll) 297I_u2u1u3(_sll)
291I_u3u2u1(_sllv) 298I_u3u2u1(_sllv)
299I_s3s1s2(_slt)
292I_u2u1s3(_sltiu) 300I_u2u1s3(_sltiu)
293I_u3u1u2(_sltu) 301I_u3u1u2(_sltu)
294I_u2u1u3(_sra) 302I_u2u1u3(_sra)
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index a67b9753330b..b87390a56a2f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -119,8 +119,6 @@
119/* Arguments used by JIT */ 119/* Arguments used by JIT */
120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ 120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
121 121
122#define FLAG_NEED_X_RESET (1 << 0)
123
124#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ 122#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
125 123
126/** 124/**
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
153 return 0; 151 return 0;
154} 152}
155 153
154static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
155
156/* Simply emit the instruction if the JIT memory space has been allocated */ 156/* Simply emit the instruction if the JIT memory space has been allocated */
157#define emit_instr(ctx, func, ...) \ 157#define emit_instr(ctx, func, ...) \
158do { \ 158do { \
@@ -166,9 +166,7 @@ do { \
166/* Determine if immediate is within the 16-bit signed range */ 166/* Determine if immediate is within the 16-bit signed range */
167static inline bool is_range16(s32 imm) 167static inline bool is_range16(s32 imm)
168{ 168{
169 if (imm >= SBIT(15) || imm < -SBIT(15)) 169 return !(imm >= SBIT(15) || imm < -SBIT(15));
170 return true;
171 return false;
172} 170}
173 171
174static inline void emit_addu(unsigned int dst, unsigned int src1, 172static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
187{ 185{
188 if (ctx->target != NULL) { 186 if (ctx->target != NULL) {
189 /* addiu can only handle s16 */ 187 /* addiu can only handle s16 */
190 if (is_range16(imm)) { 188 if (!is_range16(imm)) {
191 u32 *p = &ctx->target[ctx->idx]; 189 u32 *p = &ctx->target[ctx->idx];
192 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); 190 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
193 p = &ctx->target[ctx->idx + 1]; 191 p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
199 } 197 }
200 ctx->idx++; 198 ctx->idx++;
201 199
202 if (is_range16(imm)) 200 if (!is_range16(imm))
203 ctx->idx++; 201 ctx->idx++;
204} 202}
205 203
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
240static inline void emit_addiu(unsigned int dst, unsigned int src, 238static inline void emit_addiu(unsigned int dst, unsigned int src,
241 u32 imm, struct jit_ctx *ctx) 239 u32 imm, struct jit_ctx *ctx)
242{ 240{
243 if (is_range16(imm)) { 241 if (!is_range16(imm)) {
244 emit_load_imm(r_tmp, imm, ctx); 242 emit_load_imm(r_tmp, imm, ctx);
245 emit_addu(dst, r_tmp, src, ctx); 243 emit_addu(dst, r_tmp, src, ctx);
246 } else { 244 } else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
313 unsigned int sa, struct jit_ctx *ctx) 311 unsigned int sa, struct jit_ctx *ctx)
314{ 312{
315 /* sa is 5-bits long */ 313 /* sa is 5-bits long */
316 BUG_ON(sa >= BIT(5)); 314 if (sa >= BIT(5))
317 emit_instr(ctx, sll, dst, src, sa); 315 /* Shifting >= 32 results in zero */
316 emit_jit_reg_move(dst, r_zero, ctx);
317 else
318 emit_instr(ctx, sll, dst, src, sa);
318} 319}
319 320
320static inline void emit_srlv(unsigned int dst, unsigned int src, 321static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
327 unsigned int sa, struct jit_ctx *ctx) 328 unsigned int sa, struct jit_ctx *ctx)
328{ 329{
329 /* sa is 5-bits long */ 330 /* sa is 5-bits long */
330 BUG_ON(sa >= BIT(5)); 331 if (sa >= BIT(5))
331 emit_instr(ctx, srl, dst, src, sa); 332 /* Shifting >= 32 results in zero */
333 emit_jit_reg_move(dst, r_zero, ctx);
334 else
335 emit_instr(ctx, srl, dst, src, sa);
336}
337
338static inline void emit_slt(unsigned int dst, unsigned int src1,
339 unsigned int src2, struct jit_ctx *ctx)
340{
341 emit_instr(ctx, slt, dst, src1, src2);
332} 342}
333 343
334static inline void emit_sltu(unsigned int dst, unsigned int src1, 344static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
341 unsigned int imm, struct jit_ctx *ctx) 351 unsigned int imm, struct jit_ctx *ctx)
342{ 352{
343 /* 16 bit immediate */ 353 /* 16 bit immediate */
344 if (is_range16((s32)imm)) { 354 if (!is_range16((s32)imm)) {
345 emit_load_imm(r_tmp, imm, ctx); 355 emit_load_imm(r_tmp, imm, ctx);
346 emit_sltu(dst, src, r_tmp, ctx); 356 emit_sltu(dst, src, r_tmp, ctx);
347 } else { 357 } else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
408 u32 *p = &ctx->target[ctx->idx]; 418 u32 *p = &ctx->target[ctx->idx];
409 uasm_i_divu(&p, dst, src); 419 uasm_i_divu(&p, dst, src);
410 p = &ctx->target[ctx->idx + 1]; 420 p = &ctx->target[ctx->idx + 1];
411 uasm_i_mfhi(&p, dst); 421 uasm_i_mflo(&p, dst);
412 } 422 }
413 ctx->idx += 2; /* 2 insts */ 423 ctx->idx += 2; /* 2 insts */
414} 424}
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
443 emit_instr(ctx, wsbh, dst, src); 453 emit_instr(ctx, wsbh, dst, src);
444} 454}
445 455
456/* load pointer to register */
457static inline void emit_load_ptr(unsigned int dst, unsigned int src,
458 int imm, struct jit_ctx *ctx)
459{
460 /* src contains the base addr of the 32/64-pointer */
461 if (config_enabled(CONFIG_64BIT))
462 emit_instr(ctx, ld, dst, imm, src);
463 else
464 emit_instr(ctx, lw, dst, imm, src);
465}
466
446/* load a function pointer to register */ 467/* load a function pointer to register */
447static inline void emit_load_func(unsigned int reg, ptr imm, 468static inline void emit_load_func(unsigned int reg, ptr imm,
448 struct jit_ctx *ctx) 469 struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
545 return num; 566 return num;
546} 567}
547 568
548static inline void update_on_xread(struct jit_ctx *ctx)
549{
550 if (!(ctx->flags & SEEN_X))
551 ctx->flags |= FLAG_NEED_X_RESET;
552
553 ctx->flags |= SEEN_X;
554}
555
556static bool is_load_to_a(u16 inst) 569static bool is_load_to_a(u16 inst)
557{ 570{
558 switch (inst) { 571 switch (inst) {
559 case BPF_S_LD_W_LEN: 572 case BPF_LD | BPF_W | BPF_LEN:
560 case BPF_S_LD_W_ABS: 573 case BPF_LD | BPF_W | BPF_ABS:
561 case BPF_S_LD_H_ABS: 574 case BPF_LD | BPF_H | BPF_ABS:
562 case BPF_S_LD_B_ABS: 575 case BPF_LD | BPF_B | BPF_ABS:
563 case BPF_S_ANC_CPU:
564 case BPF_S_ANC_IFINDEX:
565 case BPF_S_ANC_MARK:
566 case BPF_S_ANC_PROTOCOL:
567 case BPF_S_ANC_RXHASH:
568 case BPF_S_ANC_VLAN_TAG:
569 case BPF_S_ANC_VLAN_TAG_PRESENT:
570 case BPF_S_ANC_QUEUE:
571 return true; 576 return true;
572 default: 577 default:
573 return false; 578 return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
618 if (ctx->flags & SEEN_MEM) { 623 if (ctx->flags & SEEN_MEM) {
619 if (real_off % (RSIZE * 2)) 624 if (real_off % (RSIZE * 2))
620 real_off += RSIZE; 625 real_off += RSIZE;
621 emit_addiu(r_M, r_sp, real_off, ctx); 626 if (config_enabled(CONFIG_64BIT))
627 emit_daddiu(r_M, r_sp, real_off, ctx);
628 else
629 emit_addiu(r_M, r_sp, real_off, ctx);
622 } 630 }
623} 631}
624 632
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
705 if (ctx->flags & SEEN_SKB) 713 if (ctx->flags & SEEN_SKB)
706 emit_reg_move(r_skb, MIPS_R_A0, ctx); 714 emit_reg_move(r_skb, MIPS_R_A0, ctx);
707 715
708 if (ctx->flags & FLAG_NEED_X_RESET) 716 if (ctx->flags & SEEN_X)
709 emit_jit_reg_move(r_X, r_zero, ctx); 717 emit_jit_reg_move(r_X, r_zero, ctx);
710 718
711 /* Do not leak kernel data to userspace */ 719 /* Do not leak kernel data to userspace */
712 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 720 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
713 emit_jit_reg_move(r_A, r_zero, ctx); 721 emit_jit_reg_move(r_A, r_zero, ctx);
714} 722}
715 723
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
757 return (u64)err << 32 | ntohl(ret); 765 return (u64)err << 32 | ntohl(ret);
758} 766}
759 767
760#define PKT_TYPE_MAX 7 768#ifdef __BIG_ENDIAN_BITFIELD
769#define PKT_TYPE_MAX (7 << 5)
770#else
771#define PKT_TYPE_MAX 7
772#endif
761static int pkt_type_offset(void) 773static int pkt_type_offset(void)
762{ 774{
763 struct sk_buff skb_probe = { 775 struct sk_buff skb_probe = {
764 .pkt_type = ~0, 776 .pkt_type = ~0,
765 }; 777 };
766 char *ct = (char *)&skb_probe; 778 u8 *ct = (u8 *)&skb_probe;
767 unsigned int off; 779 unsigned int off;
768 780
769 for (off = 0; off < sizeof(struct sk_buff); off++) { 781 for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -783,46 +795,62 @@ static int build_body(struct jit_ctx *ctx)
783 u32 k, b_off __maybe_unused; 795 u32 k, b_off __maybe_unused;
784 796
785 for (i = 0; i < prog->len; i++) { 797 for (i = 0; i < prog->len; i++) {
798 u16 code;
799
786 inst = &(prog->insns[i]); 800 inst = &(prog->insns[i]);
787 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", 801 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
788 __func__, inst->code, inst->jt, inst->jf, inst->k); 802 __func__, inst->code, inst->jt, inst->jf, inst->k);
789 k = inst->k; 803 k = inst->k;
804 code = bpf_anc_helper(inst);
790 805
791 if (ctx->target == NULL) 806 if (ctx->target == NULL)
792 ctx->offsets[i] = ctx->idx * 4; 807 ctx->offsets[i] = ctx->idx * 4;
793 808
794 switch (inst->code) { 809 switch (code) {
795 case BPF_S_LD_IMM: 810 case BPF_LD | BPF_IMM:
796 /* A <- k ==> li r_A, k */ 811 /* A <- k ==> li r_A, k */
797 ctx->flags |= SEEN_A; 812 ctx->flags |= SEEN_A;
798 emit_load_imm(r_A, k, ctx); 813 emit_load_imm(r_A, k, ctx);
799 break; 814 break;
800 case BPF_S_LD_W_LEN: 815 case BPF_LD | BPF_W | BPF_LEN:
801 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 816 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
802 /* A <- len ==> lw r_A, offset(skb) */ 817 /* A <- len ==> lw r_A, offset(skb) */
803 ctx->flags |= SEEN_SKB | SEEN_A; 818 ctx->flags |= SEEN_SKB | SEEN_A;
804 off = offsetof(struct sk_buff, len); 819 off = offsetof(struct sk_buff, len);
805 emit_load(r_A, r_skb, off, ctx); 820 emit_load(r_A, r_skb, off, ctx);
806 break; 821 break;
807 case BPF_S_LD_MEM: 822 case BPF_LD | BPF_MEM:
808 /* A <- M[k] ==> lw r_A, offset(M) */ 823 /* A <- M[k] ==> lw r_A, offset(M) */
809 ctx->flags |= SEEN_MEM | SEEN_A; 824 ctx->flags |= SEEN_MEM | SEEN_A;
810 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); 825 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
811 break; 826 break;
812 case BPF_S_LD_W_ABS: 827 case BPF_LD | BPF_W | BPF_ABS:
813 /* A <- P[k:4] */ 828 /* A <- P[k:4] */
814 load_order = 2; 829 load_order = 2;
815 goto load; 830 goto load;
816 case BPF_S_LD_H_ABS: 831 case BPF_LD | BPF_H | BPF_ABS:
817 /* A <- P[k:2] */ 832 /* A <- P[k:2] */
818 load_order = 1; 833 load_order = 1;
819 goto load; 834 goto load;
820 case BPF_S_LD_B_ABS: 835 case BPF_LD | BPF_B | BPF_ABS:
821 /* A <- P[k:1] */ 836 /* A <- P[k:1] */
822 load_order = 0; 837 load_order = 0;
823load: 838load:
839 /* the interpreter will deal with the negative K */
840 if ((int)k < 0)
841 return -ENOTSUPP;
842
824 emit_load_imm(r_off, k, ctx); 843 emit_load_imm(r_off, k, ctx);
825load_common: 844load_common:
845 /*
846 * We may got here from the indirect loads so
847 * return if offset is negative.
848 */
849 emit_slt(r_s0, r_off, r_zero, ctx);
850 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
851 b_imm(prog->len, ctx), ctx);
852 emit_reg_move(r_ret, r_zero, ctx);
853
826 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | 854 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
827 SEEN_SKB | SEEN_A; 855 SEEN_SKB | SEEN_A;
828 856
@@ -852,39 +880,42 @@ load_common:
852 emit_b(b_imm(prog->len, ctx), ctx); 880 emit_b(b_imm(prog->len, ctx), ctx);
853 emit_reg_move(r_ret, r_zero, ctx); 881 emit_reg_move(r_ret, r_zero, ctx);
854 break; 882 break;
855 case BPF_S_LD_W_IND: 883 case BPF_LD | BPF_W | BPF_IND:
856 /* A <- P[X + k:4] */ 884 /* A <- P[X + k:4] */
857 load_order = 2; 885 load_order = 2;
858 goto load_ind; 886 goto load_ind;
859 case BPF_S_LD_H_IND: 887 case BPF_LD | BPF_H | BPF_IND:
860 /* A <- P[X + k:2] */ 888 /* A <- P[X + k:2] */
861 load_order = 1; 889 load_order = 1;
862 goto load_ind; 890 goto load_ind;
863 case BPF_S_LD_B_IND: 891 case BPF_LD | BPF_B | BPF_IND:
864 /* A <- P[X + k:1] */ 892 /* A <- P[X + k:1] */
865 load_order = 0; 893 load_order = 0;
866load_ind: 894load_ind:
867 update_on_xread(ctx);
868 ctx->flags |= SEEN_OFF | SEEN_X; 895 ctx->flags |= SEEN_OFF | SEEN_X;
869 emit_addiu(r_off, r_X, k, ctx); 896 emit_addiu(r_off, r_X, k, ctx);
870 goto load_common; 897 goto load_common;
871 case BPF_S_LDX_IMM: 898 case BPF_LDX | BPF_IMM:
872 /* X <- k */ 899 /* X <- k */
873 ctx->flags |= SEEN_X; 900 ctx->flags |= SEEN_X;
874 emit_load_imm(r_X, k, ctx); 901 emit_load_imm(r_X, k, ctx);
875 break; 902 break;
876 case BPF_S_LDX_MEM: 903 case BPF_LDX | BPF_MEM:
877 /* X <- M[k] */ 904 /* X <- M[k] */
878 ctx->flags |= SEEN_X | SEEN_MEM; 905 ctx->flags |= SEEN_X | SEEN_MEM;
879 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); 906 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
880 break; 907 break;
881 case BPF_S_LDX_W_LEN: 908 case BPF_LDX | BPF_W | BPF_LEN:
882 /* X <- len */ 909 /* X <- len */
883 ctx->flags |= SEEN_X | SEEN_SKB; 910 ctx->flags |= SEEN_X | SEEN_SKB;
884 off = offsetof(struct sk_buff, len); 911 off = offsetof(struct sk_buff, len);
885 emit_load(r_X, r_skb, off, ctx); 912 emit_load(r_X, r_skb, off, ctx);
886 break; 913 break;
887 case BPF_S_LDX_B_MSH: 914 case BPF_LDX | BPF_B | BPF_MSH:
915 /* the interpreter will deal with the negative K */
916 if ((int)k < 0)
917 return -ENOTSUPP;
918
888 /* X <- 4 * (P[k:1] & 0xf) */ 919 /* X <- 4 * (P[k:1] & 0xf) */
889 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; 920 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
890 /* Load offset to a1 */ 921 /* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
917 emit_b(b_imm(prog->len, ctx), ctx); 948 emit_b(b_imm(prog->len, ctx), ctx);
918 emit_load_imm(r_ret, 0, ctx); /* delay slot */ 949 emit_load_imm(r_ret, 0, ctx); /* delay slot */
919 break; 950 break;
920 case BPF_S_ST: 951 case BPF_ST:
921 /* M[k] <- A */ 952 /* M[k] <- A */
922 ctx->flags |= SEEN_MEM | SEEN_A; 953 ctx->flags |= SEEN_MEM | SEEN_A;
923 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); 954 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
924 break; 955 break;
925 case BPF_S_STX: 956 case BPF_STX:
926 /* M[k] <- X */ 957 /* M[k] <- X */
927 ctx->flags |= SEEN_MEM | SEEN_X; 958 ctx->flags |= SEEN_MEM | SEEN_X;
928 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); 959 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
929 break; 960 break;
930 case BPF_S_ALU_ADD_K: 961 case BPF_ALU | BPF_ADD | BPF_K:
931 /* A += K */ 962 /* A += K */
932 ctx->flags |= SEEN_A; 963 ctx->flags |= SEEN_A;
933 emit_addiu(r_A, r_A, k, ctx); 964 emit_addiu(r_A, r_A, k, ctx);
934 break; 965 break;
935 case BPF_S_ALU_ADD_X: 966 case BPF_ALU | BPF_ADD | BPF_X:
936 /* A += X */ 967 /* A += X */
937 ctx->flags |= SEEN_A | SEEN_X; 968 ctx->flags |= SEEN_A | SEEN_X;
938 emit_addu(r_A, r_A, r_X, ctx); 969 emit_addu(r_A, r_A, r_X, ctx);
939 break; 970 break;
940 case BPF_S_ALU_SUB_K: 971 case BPF_ALU | BPF_SUB | BPF_K:
941 /* A -= K */ 972 /* A -= K */
942 ctx->flags |= SEEN_A; 973 ctx->flags |= SEEN_A;
943 emit_addiu(r_A, r_A, -k, ctx); 974 emit_addiu(r_A, r_A, -k, ctx);
944 break; 975 break;
945 case BPF_S_ALU_SUB_X: 976 case BPF_ALU | BPF_SUB | BPF_X:
946 /* A -= X */ 977 /* A -= X */
947 ctx->flags |= SEEN_A | SEEN_X; 978 ctx->flags |= SEEN_A | SEEN_X;
948 emit_subu(r_A, r_A, r_X, ctx); 979 emit_subu(r_A, r_A, r_X, ctx);
949 break; 980 break;
950 case BPF_S_ALU_MUL_K: 981 case BPF_ALU | BPF_MUL | BPF_K:
951 /* A *= K */ 982 /* A *= K */
952 /* Load K to scratch register before MUL */ 983 /* Load K to scratch register before MUL */
953 ctx->flags |= SEEN_A | SEEN_S0; 984 ctx->flags |= SEEN_A | SEEN_S0;
954 emit_load_imm(r_s0, k, ctx); 985 emit_load_imm(r_s0, k, ctx);
955 emit_mul(r_A, r_A, r_s0, ctx); 986 emit_mul(r_A, r_A, r_s0, ctx);
956 break; 987 break;
957 case BPF_S_ALU_MUL_X: 988 case BPF_ALU | BPF_MUL | BPF_X:
958 /* A *= X */ 989 /* A *= X */
959 update_on_xread(ctx);
960 ctx->flags |= SEEN_A | SEEN_X; 990 ctx->flags |= SEEN_A | SEEN_X;
961 emit_mul(r_A, r_A, r_X, ctx); 991 emit_mul(r_A, r_A, r_X, ctx);
962 break; 992 break;
963 case BPF_S_ALU_DIV_K: 993 case BPF_ALU | BPF_DIV | BPF_K:
964 /* A /= k */ 994 /* A /= k */
965 if (k == 1) 995 if (k == 1)
966 break; 996 break;
@@ -973,7 +1003,7 @@ load_ind:
973 emit_load_imm(r_s0, k, ctx); 1003 emit_load_imm(r_s0, k, ctx);
974 emit_div(r_A, r_s0, ctx); 1004 emit_div(r_A, r_s0, ctx);
975 break; 1005 break;
976 case BPF_S_ALU_MOD_K: 1006 case BPF_ALU | BPF_MOD | BPF_K:
977 /* A %= k */ 1007 /* A %= k */
978 if (k == 1 || optimize_div(&k)) { 1008 if (k == 1 || optimize_div(&k)) {
979 ctx->flags |= SEEN_A; 1009 ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
984 emit_mod(r_A, r_s0, ctx); 1014 emit_mod(r_A, r_s0, ctx);
985 } 1015 }
986 break; 1016 break;
987 case BPF_S_ALU_DIV_X: 1017 case BPF_ALU | BPF_DIV | BPF_X:
988 /* A /= X */ 1018 /* A /= X */
989 update_on_xread(ctx);
990 ctx->flags |= SEEN_X | SEEN_A; 1019 ctx->flags |= SEEN_X | SEEN_A;
991 /* Check if r_X is zero */ 1020 /* Check if r_X is zero */
992 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1021 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
994 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1023 emit_load_imm(r_val, 0, ctx); /* delay slot */
995 emit_div(r_A, r_X, ctx); 1024 emit_div(r_A, r_X, ctx);
996 break; 1025 break;
997 case BPF_S_ALU_MOD_X: 1026 case BPF_ALU | BPF_MOD | BPF_X:
998 /* A %= X */ 1027 /* A %= X */
999 update_on_xread(ctx);
1000 ctx->flags |= SEEN_X | SEEN_A; 1028 ctx->flags |= SEEN_X | SEEN_A;
1001 /* Check if r_X is zero */ 1029 /* Check if r_X is zero */
1002 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1030 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
1004 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1032 emit_load_imm(r_val, 0, ctx); /* delay slot */
1005 emit_mod(r_A, r_X, ctx); 1033 emit_mod(r_A, r_X, ctx);
1006 break; 1034 break;
1007 case BPF_S_ALU_OR_K: 1035 case BPF_ALU | BPF_OR | BPF_K:
1008 /* A |= K */ 1036 /* A |= K */
1009 ctx->flags |= SEEN_A; 1037 ctx->flags |= SEEN_A;
1010 emit_ori(r_A, r_A, k, ctx); 1038 emit_ori(r_A, r_A, k, ctx);
1011 break; 1039 break;
1012 case BPF_S_ALU_OR_X: 1040 case BPF_ALU | BPF_OR | BPF_X:
1013 /* A |= X */ 1041 /* A |= X */
1014 update_on_xread(ctx);
1015 ctx->flags |= SEEN_A; 1042 ctx->flags |= SEEN_A;
1016 emit_ori(r_A, r_A, r_X, ctx); 1043 emit_ori(r_A, r_A, r_X, ctx);
1017 break; 1044 break;
1018 case BPF_S_ALU_XOR_K: 1045 case BPF_ALU | BPF_XOR | BPF_K:
1019 /* A ^= k */ 1046 /* A ^= k */
1020 ctx->flags |= SEEN_A; 1047 ctx->flags |= SEEN_A;
1021 emit_xori(r_A, r_A, k, ctx); 1048 emit_xori(r_A, r_A, k, ctx);
1022 break; 1049 break;
1023 case BPF_S_ANC_ALU_XOR_X: 1050 case BPF_ANC | SKF_AD_ALU_XOR_X:
1024 case BPF_S_ALU_XOR_X: 1051 case BPF_ALU | BPF_XOR | BPF_X:
1025 /* A ^= X */ 1052 /* A ^= X */
1026 update_on_xread(ctx);
1027 ctx->flags |= SEEN_A; 1053 ctx->flags |= SEEN_A;
1028 emit_xor(r_A, r_A, r_X, ctx); 1054 emit_xor(r_A, r_A, r_X, ctx);
1029 break; 1055 break;
1030 case BPF_S_ALU_AND_K: 1056 case BPF_ALU | BPF_AND | BPF_K:
1031 /* A &= K */ 1057 /* A &= K */
1032 ctx->flags |= SEEN_A; 1058 ctx->flags |= SEEN_A;
1033 emit_andi(r_A, r_A, k, ctx); 1059 emit_andi(r_A, r_A, k, ctx);
1034 break; 1060 break;
1035 case BPF_S_ALU_AND_X: 1061 case BPF_ALU | BPF_AND | BPF_X:
1036 /* A &= X */ 1062 /* A &= X */
1037 update_on_xread(ctx);
1038 ctx->flags |= SEEN_A | SEEN_X; 1063 ctx->flags |= SEEN_A | SEEN_X;
1039 emit_and(r_A, r_A, r_X, ctx); 1064 emit_and(r_A, r_A, r_X, ctx);
1040 break; 1065 break;
1041 case BPF_S_ALU_LSH_K: 1066 case BPF_ALU | BPF_LSH | BPF_K:
1042 /* A <<= K */ 1067 /* A <<= K */
1043 ctx->flags |= SEEN_A; 1068 ctx->flags |= SEEN_A;
1044 emit_sll(r_A, r_A, k, ctx); 1069 emit_sll(r_A, r_A, k, ctx);
1045 break; 1070 break;
1046 case BPF_S_ALU_LSH_X: 1071 case BPF_ALU | BPF_LSH | BPF_X:
1047 /* A <<= X */ 1072 /* A <<= X */
1048 ctx->flags |= SEEN_A | SEEN_X; 1073 ctx->flags |= SEEN_A | SEEN_X;
1049 update_on_xread(ctx);
1050 emit_sllv(r_A, r_A, r_X, ctx); 1074 emit_sllv(r_A, r_A, r_X, ctx);
1051 break; 1075 break;
1052 case BPF_S_ALU_RSH_K: 1076 case BPF_ALU | BPF_RSH | BPF_K:
1053 /* A >>= K */ 1077 /* A >>= K */
1054 ctx->flags |= SEEN_A; 1078 ctx->flags |= SEEN_A;
1055 emit_srl(r_A, r_A, k, ctx); 1079 emit_srl(r_A, r_A, k, ctx);
1056 break; 1080 break;
1057 case BPF_S_ALU_RSH_X: 1081 case BPF_ALU | BPF_RSH | BPF_X:
1058 ctx->flags |= SEEN_A | SEEN_X; 1082 ctx->flags |= SEEN_A | SEEN_X;
1059 update_on_xread(ctx);
1060 emit_srlv(r_A, r_A, r_X, ctx); 1083 emit_srlv(r_A, r_A, r_X, ctx);
1061 break; 1084 break;
1062 case BPF_S_ALU_NEG: 1085 case BPF_ALU | BPF_NEG:
1063 /* A = -A */ 1086 /* A = -A */
1064 ctx->flags |= SEEN_A; 1087 ctx->flags |= SEEN_A;
1065 emit_neg(r_A, ctx); 1088 emit_neg(r_A, ctx);
1066 break; 1089 break;
1067 case BPF_S_JMP_JA: 1090 case BPF_JMP | BPF_JA:
1068 /* pc += K */ 1091 /* pc += K */
1069 emit_b(b_imm(i + k + 1, ctx), ctx); 1092 emit_b(b_imm(i + k + 1, ctx), ctx);
1070 emit_nop(ctx); 1093 emit_nop(ctx);
1071 break; 1094 break;
1072 case BPF_S_JMP_JEQ_K: 1095 case BPF_JMP | BPF_JEQ | BPF_K:
1073 /* pc += ( A == K ) ? pc->jt : pc->jf */ 1096 /* pc += ( A == K ) ? pc->jt : pc->jf */
1074 condt = MIPS_COND_EQ | MIPS_COND_K; 1097 condt = MIPS_COND_EQ | MIPS_COND_K;
1075 goto jmp_cmp; 1098 goto jmp_cmp;
1076 case BPF_S_JMP_JEQ_X: 1099 case BPF_JMP | BPF_JEQ | BPF_X:
1077 ctx->flags |= SEEN_X; 1100 ctx->flags |= SEEN_X;
1078 /* pc += ( A == X ) ? pc->jt : pc->jf */ 1101 /* pc += ( A == X ) ? pc->jt : pc->jf */
1079 condt = MIPS_COND_EQ | MIPS_COND_X; 1102 condt = MIPS_COND_EQ | MIPS_COND_X;
1080 goto jmp_cmp; 1103 goto jmp_cmp;
1081 case BPF_S_JMP_JGE_K: 1104 case BPF_JMP | BPF_JGE | BPF_K:
1082 /* pc += ( A >= K ) ? pc->jt : pc->jf */ 1105 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1083 condt = MIPS_COND_GE | MIPS_COND_K; 1106 condt = MIPS_COND_GE | MIPS_COND_K;
1084 goto jmp_cmp; 1107 goto jmp_cmp;
1085 case BPF_S_JMP_JGE_X: 1108 case BPF_JMP | BPF_JGE | BPF_X:
1086 ctx->flags |= SEEN_X; 1109 ctx->flags |= SEEN_X;
1087 /* pc += ( A >= X ) ? pc->jt : pc->jf */ 1110 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1088 condt = MIPS_COND_GE | MIPS_COND_X; 1111 condt = MIPS_COND_GE | MIPS_COND_X;
1089 goto jmp_cmp; 1112 goto jmp_cmp;
1090 case BPF_S_JMP_JGT_K: 1113 case BPF_JMP | BPF_JGT | BPF_K:
1091 /* pc += ( A > K ) ? pc->jt : pc->jf */ 1114 /* pc += ( A > K ) ? pc->jt : pc->jf */
1092 condt = MIPS_COND_GT | MIPS_COND_K; 1115 condt = MIPS_COND_GT | MIPS_COND_K;
1093 goto jmp_cmp; 1116 goto jmp_cmp;
1094 case BPF_S_JMP_JGT_X: 1117 case BPF_JMP | BPF_JGT | BPF_X:
1095 ctx->flags |= SEEN_X; 1118 ctx->flags |= SEEN_X;
1096 /* pc += ( A > X ) ? pc->jt : pc->jf */ 1119 /* pc += ( A > X ) ? pc->jt : pc->jf */
1097 condt = MIPS_COND_GT | MIPS_COND_X; 1120 condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
1109 } 1132 }
1110 /* A < (K|X) ? r_scrach = 1 */ 1133 /* A < (K|X) ? r_scrach = 1 */
1111 b_off = b_imm(i + inst->jf + 1, ctx); 1134 b_off = b_imm(i + inst->jf + 1, ctx);
1112 emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off, 1135 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1113 ctx); 1136 ctx);
1114 emit_nop(ctx); 1137 emit_nop(ctx);
1115 /* A > (K|X) ? scratch = 0 */ 1138 /* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
1167 } 1190 }
1168 } 1191 }
1169 break; 1192 break;
1170 case BPF_S_JMP_JSET_K: 1193 case BPF_JMP | BPF_JSET | BPF_K:
1171 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; 1194 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
1172 /* pc += (A & K) ? pc -> jt : pc -> jf */ 1195 /* pc += (A & K) ? pc -> jt : pc -> jf */
1173 emit_load_imm(r_s1, k, ctx); 1196 emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
1181 emit_b(b_off, ctx); 1204 emit_b(b_off, ctx);
1182 emit_nop(ctx); 1205 emit_nop(ctx);
1183 break; 1206 break;
1184 case BPF_S_JMP_JSET_X: 1207 case BPF_JMP | BPF_JSET | BPF_X:
1185 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; 1208 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
1186 /* pc += (A & X) ? pc -> jt : pc -> jf */ 1209 /* pc += (A & X) ? pc -> jt : pc -> jf */
1187 emit_and(r_s0, r_A, r_X, ctx); 1210 emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
1194 emit_b(b_off, ctx); 1217 emit_b(b_off, ctx);
1195 emit_nop(ctx); 1218 emit_nop(ctx);
1196 break; 1219 break;
1197 case BPF_S_RET_A: 1220 case BPF_RET | BPF_A:
1198 ctx->flags |= SEEN_A; 1221 ctx->flags |= SEEN_A;
1199 if (i != prog->len - 1) 1222 if (i != prog->len - 1)
1200 /* 1223 /*
@@ -1204,7 +1227,7 @@ jmp_cmp:
1204 emit_b(b_imm(prog->len, ctx), ctx); 1227 emit_b(b_imm(prog->len, ctx), ctx);
1205 emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1228 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1206 break; 1229 break;
1207 case BPF_S_RET_K: 1230 case BPF_RET | BPF_K:
1208 /* 1231 /*
1209 * It can emit two instructions so it does not fit on 1232 * It can emit two instructions so it does not fit on
1210 * the delay slot. 1233 * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
1219 emit_nop(ctx); 1242 emit_nop(ctx);
1220 } 1243 }
1221 break; 1244 break;
1222 case BPF_S_MISC_TAX: 1245 case BPF_MISC | BPF_TAX:
1223 /* X = A */ 1246 /* X = A */
1224 ctx->flags |= SEEN_X | SEEN_A; 1247 ctx->flags |= SEEN_X | SEEN_A;
1225 emit_jit_reg_move(r_X, r_A, ctx); 1248 emit_jit_reg_move(r_X, r_A, ctx);
1226 break; 1249 break;
1227 case BPF_S_MISC_TXA: 1250 case BPF_MISC | BPF_TXA:
1228 /* A = X */ 1251 /* A = X */
1229 ctx->flags |= SEEN_A | SEEN_X; 1252 ctx->flags |= SEEN_A | SEEN_X;
1230 update_on_xread(ctx);
1231 emit_jit_reg_move(r_A, r_X, ctx); 1253 emit_jit_reg_move(r_A, r_X, ctx);
1232 break; 1254 break;
1233 /* AUX */ 1255 /* AUX */
1234 case BPF_S_ANC_PROTOCOL: 1256 case BPF_ANC | SKF_AD_PROTOCOL:
1235 /* A = ntohs(skb->protocol */ 1257 /* A = ntohs(skb->protocol */
1236 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; 1258 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1237 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1259 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
1256 } 1278 }
1257#endif 1279#endif
1258 break; 1280 break;
1259 case BPF_S_ANC_CPU: 1281 case BPF_ANC | SKF_AD_CPU:
1260 ctx->flags |= SEEN_A | SEEN_OFF; 1282 ctx->flags |= SEEN_A | SEEN_OFF;
1261 /* A = current_thread_info()->cpu */ 1283 /* A = current_thread_info()->cpu */
1262 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, 1284 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
1265 /* $28/gp points to the thread_info struct */ 1287 /* $28/gp points to the thread_info struct */
1266 emit_load(r_A, 28, off, ctx); 1288 emit_load(r_A, 28, off, ctx);
1267 break; 1289 break;
1268 case BPF_S_ANC_IFINDEX: 1290 case BPF_ANC | SKF_AD_IFINDEX:
1269 /* A = skb->dev->ifindex */ 1291 /* A = skb->dev->ifindex */
1270 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; 1292 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
1271 off = offsetof(struct sk_buff, dev); 1293 off = offsetof(struct sk_buff, dev);
1272 emit_load(r_s0, r_skb, off, ctx); 1294 /* Load *dev pointer */
1295 emit_load_ptr(r_s0, r_skb, off, ctx);
1273 /* error (0) in the delay slot */ 1296 /* error (0) in the delay slot */
1274 emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1297 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1275 b_imm(prog->len, ctx), ctx); 1298 b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
1279 off = offsetof(struct net_device, ifindex); 1302 off = offsetof(struct net_device, ifindex);
1280 emit_load(r_A, r_s0, off, ctx); 1303 emit_load(r_A, r_s0, off, ctx);
1281 break; 1304 break;
1282 case BPF_S_ANC_MARK: 1305 case BPF_ANC | SKF_AD_MARK:
1283 ctx->flags |= SEEN_SKB | SEEN_A; 1306 ctx->flags |= SEEN_SKB | SEEN_A;
1284 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 1307 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1285 off = offsetof(struct sk_buff, mark); 1308 off = offsetof(struct sk_buff, mark);
1286 emit_load(r_A, r_skb, off, ctx); 1309 emit_load(r_A, r_skb, off, ctx);
1287 break; 1310 break;
1288 case BPF_S_ANC_RXHASH: 1311 case BPF_ANC | SKF_AD_RXHASH:
1289 ctx->flags |= SEEN_SKB | SEEN_A; 1312 ctx->flags |= SEEN_SKB | SEEN_A;
1290 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 1313 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1291 off = offsetof(struct sk_buff, hash); 1314 off = offsetof(struct sk_buff, hash);
1292 emit_load(r_A, r_skb, off, ctx); 1315 emit_load(r_A, r_skb, off, ctx);
1293 break; 1316 break;
1294 case BPF_S_ANC_VLAN_TAG: 1317 case BPF_ANC | SKF_AD_VLAN_TAG:
1295 case BPF_S_ANC_VLAN_TAG_PRESENT: 1318 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1296 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; 1319 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
1297 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1320 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1298 vlan_tci) != 2); 1321 vlan_tci) != 2);
1299 off = offsetof(struct sk_buff, vlan_tci); 1322 off = offsetof(struct sk_buff, vlan_tci);
1300 emit_half_load(r_s0, r_skb, off, ctx); 1323 emit_half_load(r_s0, r_skb, off, ctx);
1301 if (inst->code == BPF_S_ANC_VLAN_TAG) 1324 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1302 emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); 1325 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1303 else 1326 } else {
1304 emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); 1327 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1328 /* return 1 if present */
1329 emit_sltu(r_A, r_zero, r_A, ctx);
1330 }
1305 break; 1331 break;
1306 case BPF_S_ANC_PKTTYPE: 1332 case BPF_ANC | SKF_AD_PKTTYPE:
1333 ctx->flags |= SEEN_SKB;
1334
1307 off = pkt_type_offset(); 1335 off = pkt_type_offset();
1308 1336
1309 if (off < 0) 1337 if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
1311 emit_load_byte(r_tmp, r_skb, off, ctx); 1339 emit_load_byte(r_tmp, r_skb, off, ctx);
1312 /* Keep only the last 3 bits */ 1340 /* Keep only the last 3 bits */
1313 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); 1341 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1342#ifdef __BIG_ENDIAN_BITFIELD
1343 /* Get the actual packet type to the lower 3 bits */
1344 emit_srl(r_A, r_A, 5, ctx);
1345#endif
1314 break; 1346 break;
1315 case BPF_S_ANC_QUEUE: 1347 case BPF_ANC | SKF_AD_QUEUE:
1316 ctx->flags |= SEEN_SKB | SEEN_A; 1348 ctx->flags |= SEEN_SKB | SEEN_A;
1317 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1318 queue_mapping) != 2); 1350 queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
1322 emit_half_load(r_A, r_skb, off, ctx); 1354 emit_half_load(r_A, r_skb, off, ctx);
1323 break; 1355 break;
1324 default: 1356 default:
1325 pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__, 1357 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1326 inst->code); 1358 inst->code);
1327 return -1; 1359 return -1;
1328 } 1360 }
1329 } 1361 }
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 790352f93700..35d16bd2760b 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -303,7 +303,6 @@ config PPC_EARLY_DEBUG_OPAL_VTERMNO
303 This correspond to which /dev/hvcN you want to use for early 303 This correspond to which /dev/hvcN you want to use for early
304 debug. 304 debug.
305 305
306 On OPAL v1 (takeover) this should always be 0
307 On OPAL v2, this will be 0 for network console and 1 or 2 for 306 On OPAL v2, this will be 0 for network console and 1 or 2 for
308 the machine built-in serial ports. 307 the machine built-in serial ports.
309 308
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 37991e154ef8..840a5509b3f1 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -88,4 +88,15 @@ static inline unsigned long ppc_function_entry(void *func)
88#endif 88#endif
89} 89}
90 90
91static inline unsigned long ppc_global_function_entry(void *func)
92{
93#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
94 /* PPC64 ABIv2 the global entry point is at the address */
95 return (unsigned long)func;
96#else
97 /* All other cases there is no change vs ppc_function_entry() */
98 return ppc_function_entry(func);
99#endif
100}
101
91#endif /* _ASM_POWERPC_CODE_PATCHING_H */ 102#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 460018889ba9..0da1dbd42e02 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -12,27 +12,7 @@
12#ifndef __OPAL_H 12#ifndef __OPAL_H
13#define __OPAL_H 13#define __OPAL_H
14 14
15/****** Takeover interface ********/
16
17/* PAPR H-Call used to querty the HAL existence and/or instanciate
18 * it from within pHyp (tech preview only).
19 *
20 * This is exclusively used in prom_init.c
21 */
22
23#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
24
25struct opal_takeover_args {
26 u64 k_image; /* r4 */
27 u64 k_size; /* r5 */
28 u64 k_entry; /* r6 */
29 u64 k_entry2; /* r7 */
30 u64 hal_addr; /* r8 */
31 u64 rd_image; /* r9 */
32 u64 rd_size; /* r10 */
33 u64 rd_loc; /* r11 */
34};
35
36/* 16/*
37 * SG entry 17 * SG entry
38 * 18 *
@@ -55,15 +35,6 @@ struct opal_sg_list {
55/* We calculate number of sg entries based on PAGE_SIZE */ 35/* We calculate number of sg entries based on PAGE_SIZE */
56#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 36#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
57 37
58extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
59
60extern long opal_do_takeover(struct opal_takeover_args *args);
61
62struct rtas_args;
63extern int opal_enter_rtas(struct rtas_args *args,
64 unsigned long data,
65 unsigned long entry);
66
67#endif /* __ASSEMBLY__ */ 38#endif /* __ASSEMBLY__ */
68 39
69/****** OPAL APIs ******/ 40/****** OPAL APIs ******/
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index b9bd1ca944d0..96f59de61855 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -9,10 +9,6 @@
9 9
10#include <uapi/asm/swab.h> 10#include <uapi/asm/swab.h>
11 11
12#ifdef __GNUC__
13#ifndef __powerpc64__
14#endif /* __powerpc64__ */
15
16static __inline__ __u16 ld_le16(const volatile __u16 *addr) 12static __inline__ __u16 ld_le16(const volatile __u16 *addr)
17{ 13{
18 __u16 val; 14 __u16 val;
@@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
20 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 16 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
21 return val; 17 return val;
22} 18}
23#define __arch_swab16p ld_le16
24 19
25static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) 20static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
26{ 21{
27 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 22 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
28} 23}
29 24
30static inline void __arch_swab16s(__u16 *addr)
31{
32 st_le16(addr, *addr);
33}
34#define __arch_swab16s __arch_swab16s
35
36static __inline__ __u32 ld_le32(const volatile __u32 *addr) 25static __inline__ __u32 ld_le32(const volatile __u32 *addr)
37{ 26{
38 __u32 val; 27 __u32 val;
@@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
40 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 29 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
41 return val; 30 return val;
42} 31}
43#define __arch_swab32p ld_le32
44 32
45static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) 33static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
46{ 34{
47 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 35 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
48} 36}
49 37
50static inline void __arch_swab32s(__u32 *addr)
51{
52 st_le32(addr, *addr);
53}
54#define __arch_swab32s __arch_swab32s
55
56static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
57{
58 __u16 result;
59
60 __asm__("rlwimi %0,%1,8,16,23"
61 : "=r" (result)
62 : "r" (value), "0" (value >> 8));
63 return result;
64}
65#define __arch_swab16 __arch_swab16
66
67static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
68{
69 __u32 result;
70
71 __asm__("rlwimi %0,%1,24,16,23\n\t"
72 "rlwimi %0,%1,8,8,15\n\t"
73 "rlwimi %0,%1,24,0,7"
74 : "=r" (result)
75 : "r" (value), "0" (value >> 24));
76 return result;
77}
78#define __arch_swab32 __arch_swab32
79
80#endif /* __GNUC__ */
81#endif /* _ASM_POWERPC_SWAB_H */ 38#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index f202d0731b06..d178834fe508 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) "ftrace-powerpc: " fmt
14
13#include <linux/spinlock.h> 15#include <linux/spinlock.h>
14#include <linux/hardirq.h> 16#include <linux/hardirq.h>
15#include <linux/uaccess.h> 17#include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
105 struct dyn_ftrace *rec, unsigned long addr) 107 struct dyn_ftrace *rec, unsigned long addr)
106{ 108{
107 unsigned int op; 109 unsigned int op;
108 unsigned long ptr; 110 unsigned long entry, ptr;
109 unsigned long ip = rec->ip; 111 unsigned long ip = rec->ip;
110 void *tramp; 112 void *tramp;
111 113
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
115 117
116 /* Make sure that that this is still a 24bit jump */ 118 /* Make sure that that this is still a 24bit jump */
117 if (!is_bl_op(op)) { 119 if (!is_bl_op(op)) {
118 printk(KERN_ERR "Not expected bl: opcode is %x\n", op); 120 pr_err("Not expected bl: opcode is %x\n", op);
119 return -EINVAL; 121 return -EINVAL;
120 } 122 }
121 123
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
125 pr_devel("ip:%lx jumps to %p", ip, tramp); 127 pr_devel("ip:%lx jumps to %p", ip, tramp);
126 128
127 if (!is_module_trampoline(tramp)) { 129 if (!is_module_trampoline(tramp)) {
128 printk(KERN_ERR "Not a trampoline\n"); 130 pr_err("Not a trampoline\n");
129 return -EINVAL; 131 return -EINVAL;
130 } 132 }
131 133
132 if (module_trampoline_target(mod, tramp, &ptr)) { 134 if (module_trampoline_target(mod, tramp, &ptr)) {
133 printk(KERN_ERR "Failed to get trampoline target\n"); 135 pr_err("Failed to get trampoline target\n");
134 return -EFAULT; 136 return -EFAULT;
135 } 137 }
136 138
137 pr_devel("trampoline target %lx", ptr); 139 pr_devel("trampoline target %lx", ptr);
138 140
141 entry = ppc_global_function_entry((void *)addr);
139 /* This should match what was called */ 142 /* This should match what was called */
140 if (ptr != ppc_function_entry((void *)addr)) { 143 if (ptr != entry) {
141 printk(KERN_ERR "addr %lx does not match expected %lx\n", 144 pr_err("addr %lx does not match expected %lx\n", ptr, entry);
142 ptr, ppc_function_entry((void *)addr));
143 return -EINVAL; 145 return -EINVAL;
144 } 146 }
145 147
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
179 181
180 /* Make sure that that this is still a 24bit jump */ 182 /* Make sure that that this is still a 24bit jump */
181 if (!is_bl_op(op)) { 183 if (!is_bl_op(op)) {
182 printk(KERN_ERR "Not expected bl: opcode is %x\n", op); 184 pr_err("Not expected bl: opcode is %x\n", op);
183 return -EINVAL; 185 return -EINVAL;
184 } 186 }
185 187
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
198 200
199 /* Find where the trampoline jumps to */ 201 /* Find where the trampoline jumps to */
200 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 202 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
201 printk(KERN_ERR "Failed to read %lx\n", tramp); 203 pr_err("Failed to read %lx\n", tramp);
202 return -EFAULT; 204 return -EFAULT;
203 } 205 }
204 206
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
209 ((jmp[1] & 0xffff0000) != 0x398c0000) || 211 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
210 (jmp[2] != 0x7d8903a6) || 212 (jmp[2] != 0x7d8903a6) ||
211 (jmp[3] != 0x4e800420)) { 213 (jmp[3] != 0x4e800420)) {
212 printk(KERN_ERR "Not a trampoline\n"); 214 pr_err("Not a trampoline\n");
213 return -EINVAL; 215 return -EINVAL;
214 } 216 }
215 217
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
221 pr_devel(" %lx ", tramp); 223 pr_devel(" %lx ", tramp);
222 224
223 if (tramp != addr) { 225 if (tramp != addr) {
224 printk(KERN_ERR 226 pr_err("Trampoline location %08lx does not match addr\n",
225 "Trampoline location %08lx does not match addr\n",
226 tramp); 227 tramp);
227 return -EINVAL; 228 return -EINVAL;
228 } 229 }
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
263 */ 264 */
264 if (!rec->arch.mod) { 265 if (!rec->arch.mod) {
265 if (!mod) { 266 if (!mod) {
266 printk(KERN_ERR "No module loaded addr=%lx\n", 267 pr_err("No module loaded addr=%lx\n", addr);
267 addr);
268 return -EFAULT; 268 return -EFAULT;
269 } 269 }
270 rec->arch.mod = mod; 270 rec->arch.mod = mod;
271 } else if (mod) { 271 } else if (mod) {
272 if (mod != rec->arch.mod) { 272 if (mod != rec->arch.mod) {
273 printk(KERN_ERR 273 pr_err("Record mod %p not equal to passed in mod %p\n",
274 "Record mod %p not equal to passed in mod %p\n",
275 rec->arch.mod, mod); 274 rec->arch.mod, mod);
276 return -EINVAL; 275 return -EINVAL;
277 } 276 }
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
307 * The load offset is different depending on the ABI. For simplicity 306 * The load offset is different depending on the ABI. For simplicity
308 * just mask it out when doing the compare. 307 * just mask it out when doing the compare.
309 */ 308 */
310 if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { 309 if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
311 printk(KERN_ERR "Unexpected call sequence: %x %x\n", 310 pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
312 op[0], op[1]);
313 return -EINVAL; 311 return -EINVAL;
314 } 312 }
315 313
316 /* If we never set up a trampoline to ftrace_caller, then bail */ 314 /* If we never set up a trampoline to ftrace_caller, then bail */
317 if (!rec->arch.mod->arch.tramp) { 315 if (!rec->arch.mod->arch.tramp) {
318 printk(KERN_ERR "No ftrace trampoline\n"); 316 pr_err("No ftrace trampoline\n");
319 return -EINVAL; 317 return -EINVAL;
320 } 318 }
321 319
322 /* Ensure branch is within 24 bits */ 320 /* Ensure branch is within 24 bits */
323 if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 321 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
324 printk(KERN_ERR "Branch out of range"); 322 pr_err("Branch out of range\n");
325 return -EINVAL; 323 return -EINVAL;
326 } 324 }
327 325
328 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 326 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
329 printk(KERN_ERR "REL24 out of range!\n"); 327 pr_err("REL24 out of range!\n");
330 return -EINVAL; 328 return -EINVAL;
331 } 329 }
332 330
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
345 343
346 /* It should be pointing to a nop */ 344 /* It should be pointing to a nop */
347 if (op != PPC_INST_NOP) { 345 if (op != PPC_INST_NOP) {
348 printk(KERN_ERR "Expected NOP but have %x\n", op); 346 pr_err("Expected NOP but have %x\n", op);
349 return -EINVAL; 347 return -EINVAL;
350 } 348 }
351 349
352 /* If we never set up a trampoline to ftrace_caller, then bail */ 350 /* If we never set up a trampoline to ftrace_caller, then bail */
353 if (!rec->arch.mod->arch.tramp) { 351 if (!rec->arch.mod->arch.tramp) {
354 printk(KERN_ERR "No ftrace trampoline\n"); 352 pr_err("No ftrace trampoline\n");
355 return -EINVAL; 353 return -EINVAL;
356 } 354 }
357 355
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
359 op = create_branch((unsigned int *)ip, 357 op = create_branch((unsigned int *)ip,
360 rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 358 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
361 if (!op) { 359 if (!op) {
362 printk(KERN_ERR "REL24 out of range!\n"); 360 pr_err("REL24 out of range!\n");
363 return -EINVAL; 361 return -EINVAL;
364 } 362 }
365 363
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
397 * already have a module defined. 395 * already have a module defined.
398 */ 396 */
399 if (!rec->arch.mod) { 397 if (!rec->arch.mod) {
400 printk(KERN_ERR "No module loaded\n"); 398 pr_err("No module loaded\n");
401 return -EINVAL; 399 return -EINVAL;
402 } 400 }
403 401
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index b82227e7e21b..12e48d56f771 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
23} 23}
24unsigned int ioread16be(void __iomem *addr) 24unsigned int ioread16be(void __iomem *addr)
25{ 25{
26 return in_be16(addr); 26 return readw_be(addr);
27} 27}
28unsigned int ioread32(void __iomem *addr) 28unsigned int ioread32(void __iomem *addr)
29{ 29{
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
31} 31}
32unsigned int ioread32be(void __iomem *addr) 32unsigned int ioread32be(void __iomem *addr)
33{ 33{
34 return in_be32(addr); 34 return readl_be(addr);
35} 35}
36EXPORT_SYMBOL(ioread8); 36EXPORT_SYMBOL(ioread8);
37EXPORT_SYMBOL(ioread16); 37EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
49} 49}
50void iowrite16be(u16 val, void __iomem *addr) 50void iowrite16be(u16 val, void __iomem *addr)
51{ 51{
52 out_be16(addr, val); 52 writew_be(val, addr);
53} 53}
54void iowrite32(u32 val, void __iomem *addr) 54void iowrite32(u32 val, void __iomem *addr)
55{ 55{
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
57} 57}
58void iowrite32be(u32 val, void __iomem *addr) 58void iowrite32be(u32 val, void __iomem *addr)
59{ 59{
60 out_be32(addr, val); 60 writel_be(val, addr);
61} 61}
62EXPORT_SYMBOL(iowrite8); 62EXPORT_SYMBOL(iowrite8);
63EXPORT_SYMBOL(iowrite16); 63EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
75 */ 75 */
76void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 76void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
77{ 77{
78 _insb((u8 __iomem *) addr, dst, count); 78 readsb(addr, dst, count);
79} 79}
80void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) 80void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
81{ 81{
82 _insw_ns((u16 __iomem *) addr, dst, count); 82 readsw(addr, dst, count);
83} 83}
84void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) 84void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
85{ 85{
86 _insl_ns((u32 __iomem *) addr, dst, count); 86 readsl(addr, dst, count);
87} 87}
88EXPORT_SYMBOL(ioread8_rep); 88EXPORT_SYMBOL(ioread8_rep);
89EXPORT_SYMBOL(ioread16_rep); 89EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
91 91
92void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) 92void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
93{ 93{
94 _outsb((u8 __iomem *) addr, src, count); 94 writesb(addr, src, count);
95} 95}
96void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) 96void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
97{ 97{
98 _outsw_ns((u16 __iomem *) addr, src, count); 98 writesw(addr, src, count);
99} 99}
100void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) 100void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
101{ 101{
102 _outsl_ns((u32 __iomem *) addr, src, count); 102 writesl(addr, src, count);
103} 103}
104EXPORT_SYMBOL(iowrite8_rep); 104EXPORT_SYMBOL(iowrite8_rep);
105EXPORT_SYMBOL(iowrite16_rep); 105EXPORT_SYMBOL(iowrite16_rep);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 90fab64d911d..2f72af82513c 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/code-patching.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36#include <asm/sstep.h> 37#include <asm/sstep.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
491 return ret; 492 return ret;
492} 493}
493 494
494#ifdef CONFIG_PPC64
495unsigned long arch_deref_entry_point(void *entry) 495unsigned long arch_deref_entry_point(void *entry)
496{ 496{
497 return ((func_descr_t *)entry)->entry; 497 return ppc_global_function_entry(entry);
498} 498}
499#endif
500 499
501int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 500int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
502{ 501{
@@ -508,8 +507,12 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
508 /* setup return addr to the jprobe handler routine */ 507 /* setup return addr to the jprobe handler routine */
509 regs->nip = arch_deref_entry_point(jp->entry); 508 regs->nip = arch_deref_entry_point(jp->entry);
510#ifdef CONFIG_PPC64 509#ifdef CONFIG_PPC64
510#if defined(_CALL_ELF) && _CALL_ELF == 2
511 regs->gpr[12] = (unsigned long)jp->entry;
512#else
511 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 513 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
512#endif 514#endif
515#endif
513 516
514 return 1; 517 return 1;
515} 518}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 077d2ce6c5a7..d807ee626af9 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
315 struct modversion_info *end; 315 struct modversion_info *end;
316 316
317 for (end = (void *)vers + size; vers < end; vers++) 317 for (end = (void *)vers + size; vers < end; vers++)
318 if (vers->name[0] == '.') 318 if (vers->name[0] == '.') {
319 memmove(vers->name, vers->name+1, strlen(vers->name)); 319 memmove(vers->name, vers->name+1, strlen(vers->name));
320#ifdef ARCH_RELOCATES_KCRCTAB
321 /* The TOC symbol has no CRC computed. To avoid CRC
322 * check failing, we must force it to the expected
323 * value (see CRC check in module.c).
324 */
325 if (!strcmp(vers->name, "TOC."))
326 vers->crc = -(unsigned long)reloc_start;
327#endif
328 }
320} 329}
321 330
322/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ 331/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 613a860a203c..b694b0730971 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
662 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL); 662 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
663#endif 663#endif
664 664
665 /* Pre-initialize the cmd_line with the content of boot_commmand_line,
666 * which will be empty except when the content of the variable has
667 * been overriden by a bootloading mechanism. This happens typically
668 * with HAL takeover
669 */
670 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
671
672 /* Retrieve various informations from the /chosen node of the 665 /* Retrieve various informations from the /chosen node of the
673 * device-tree, including the platform type, initrd location and 666 * device-tree, including the platform type, initrd location and
674 * size, TCE reserve, and more ... 667 * size, TCE reserve, and more ...
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 078145acf7fb..1a85d8f96739 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
1268static u64 __initdata prom_opal_entry; 1268static u64 __initdata prom_opal_entry;
1269#endif 1269#endif
1270 1270
1271#ifdef __BIG_ENDIAN__
1272/* XXX Don't change this structure without updating opal-takeover.S */
1273static struct opal_secondary_data {
1274 s64 ack; /* 0 */
1275 u64 go; /* 8 */
1276 struct opal_takeover_args args; /* 16 */
1277} opal_secondary_data;
1278
1279static u64 __initdata prom_opal_align;
1280static u64 __initdata prom_opal_size;
1281static int __initdata prom_rtas_start_cpu;
1282static u64 __initdata prom_rtas_data;
1283static u64 __initdata prom_rtas_entry;
1284
1285extern char opal_secondary_entry;
1286
1287static void __init prom_query_opal(void)
1288{
1289 long rc;
1290
1291 /* We must not query for OPAL presence on a machine that
1292 * supports TNK takeover (970 blades), as this uses the same
1293 * h-call with different arguments and will crash
1294 */
1295 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1296 ADDR("/tnk-memory-map")))) {
1297 prom_printf("TNK takeover detected, skipping OPAL check\n");
1298 return;
1299 }
1300
1301 prom_printf("Querying for OPAL presence... ");
1302
1303 rc = opal_query_takeover(&prom_opal_size,
1304 &prom_opal_align);
1305 prom_debug("(rc = %ld) ", rc);
1306 if (rc != 0) {
1307 prom_printf("not there.\n");
1308 return;
1309 }
1310 of_platform = PLATFORM_OPAL;
1311 prom_printf(" there !\n");
1312 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1313 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1314 if (prom_opal_align < 0x10000)
1315 prom_opal_align = 0x10000;
1316}
1317
1318static int __init prom_rtas_call(int token, int nargs, int nret,
1319 int *outputs, ...)
1320{
1321 struct rtas_args rtas_args;
1322 va_list list;
1323 int i;
1324
1325 rtas_args.token = token;
1326 rtas_args.nargs = nargs;
1327 rtas_args.nret = nret;
1328 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1329 va_start(list, outputs);
1330 for (i = 0; i < nargs; ++i)
1331 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1332 va_end(list);
1333
1334 for (i = 0; i < nret; ++i)
1335 rtas_args.rets[i] = 0;
1336
1337 opal_enter_rtas(&rtas_args, prom_rtas_data,
1338 prom_rtas_entry);
1339
1340 if (nret > 1 && outputs != NULL)
1341 for (i = 0; i < nret-1; ++i)
1342 outputs[i] = rtas_args.rets[i+1];
1343 return (nret > 0)? rtas_args.rets[0]: 0;
1344}
1345
1346static void __init prom_opal_hold_cpus(void)
1347{
1348 int i, cnt, cpu, rc;
1349 long j;
1350 phandle node;
1351 char type[64];
1352 u32 servers[8];
1353 void *entry = (unsigned long *)&opal_secondary_entry;
1354 struct opal_secondary_data *data = &opal_secondary_data;
1355
1356 prom_debug("prom_opal_hold_cpus: start...\n");
1357 prom_debug(" - entry = 0x%x\n", entry);
1358 prom_debug(" - data = 0x%x\n", data);
1359
1360 data->ack = -1;
1361 data->go = 0;
1362
1363 /* look for cpus */
1364 for (node = 0; prom_next_node(&node); ) {
1365 type[0] = 0;
1366 prom_getprop(node, "device_type", type, sizeof(type));
1367 if (strcmp(type, "cpu") != 0)
1368 continue;
1369
1370 /* Skip non-configured cpus. */
1371 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1372 if (strcmp(type, "okay") != 0)
1373 continue;
1374
1375 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1376 sizeof(servers));
1377 if (cnt == PROM_ERROR)
1378 break;
1379 cnt >>= 2;
1380 for (i = 0; i < cnt; i++) {
1381 cpu = servers[i];
1382 prom_debug("CPU %d ... ", cpu);
1383 if (cpu == prom.cpu) {
1384 prom_debug("booted !\n");
1385 continue;
1386 }
1387 prom_debug("starting ... ");
1388
1389 /* Init the acknowledge var which will be reset by
1390 * the secondary cpu when it awakens from its OF
1391 * spinloop.
1392 */
1393 data->ack = -1;
1394 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1395 NULL, cpu, entry, data);
1396 prom_debug("rtas rc=%d ...", rc);
1397
1398 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1399 HMT_low();
1400 mb();
1401 }
1402 HMT_medium();
1403 if (data->ack != -1)
1404 prom_debug("done, PIR=0x%x\n", data->ack);
1405 else
1406 prom_debug("timeout !\n");
1407 }
1408 }
1409 prom_debug("prom_opal_hold_cpus: end...\n");
1410}
1411
1412static void __init prom_opal_takeover(void)
1413{
1414 struct opal_secondary_data *data = &opal_secondary_data;
1415 struct opal_takeover_args *args = &data->args;
1416 u64 align = prom_opal_align;
1417 u64 top_addr, opal_addr;
1418
1419 args->k_image = (u64)_stext;
1420 args->k_size = _end - _stext;
1421 args->k_entry = 0;
1422 args->k_entry2 = 0x60;
1423
1424 top_addr = _ALIGN_UP(args->k_size, align);
1425
1426 if (prom_initrd_start != 0) {
1427 args->rd_image = prom_initrd_start;
1428 args->rd_size = prom_initrd_end - args->rd_image;
1429 args->rd_loc = top_addr;
1430 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1431 }
1432
1433 /* Pickup an address for the HAL. We want to go really high
1434 * up to avoid problem with future kexecs. On the other hand
1435 * we don't want to be all over the TCEs on P5IOC2 machines
1436 * which are going to be up there too. We assume the machine
1437 * has plenty of memory, and we ask for the HAL for now to
1438 * be just below the 1G point, or above the initrd
1439 */
1440 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1441 if (opal_addr < top_addr)
1442 opal_addr = top_addr;
1443 args->hal_addr = opal_addr;
1444
1445 /* Copy the command line to the kernel image */
1446 strlcpy(boot_command_line, prom_cmd_line,
1447 COMMAND_LINE_SIZE);
1448
1449 prom_debug(" k_image = 0x%lx\n", args->k_image);
1450 prom_debug(" k_size = 0x%lx\n", args->k_size);
1451 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1452 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1453 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1454 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1455 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1456 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1457 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1458 prom_close_stdin();
1459 mb();
1460 data->go = 1;
1461 for (;;)
1462 opal_do_takeover(args);
1463}
1464#endif /* __BIG_ENDIAN__ */
1465
1466/* 1271/*
1467 * Allocate room for and instantiate OPAL 1272 * Allocate room for and instantiate OPAL
1468 */ 1273 */
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
1597 &val, sizeof(val)) != PROM_ERROR) 1402 &val, sizeof(val)) != PROM_ERROR)
1598 rtas_has_query_cpu_stopped = true; 1403 rtas_has_query_cpu_stopped = true;
1599 1404
1600#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1601 /* PowerVN takeover hack */
1602 prom_rtas_data = base;
1603 prom_rtas_entry = entry;
1604 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1605#endif
1606 prom_debug("rtas base = 0x%x\n", base); 1405 prom_debug("rtas base = 0x%x\n", base);
1607 prom_debug("rtas entry = 0x%x\n", entry); 1406 prom_debug("rtas entry = 0x%x\n", entry);
1608 prom_debug("rtas size = 0x%x\n", (long)size); 1407 prom_debug("rtas size = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3027 prom_instantiate_rtas(); 2826 prom_instantiate_rtas();
3028 2827
3029#ifdef CONFIG_PPC_POWERNV 2828#ifdef CONFIG_PPC_POWERNV
3030#ifdef __BIG_ENDIAN__
3031 /* Detect HAL and try instanciating it & doing takeover */
3032 if (of_platform == PLATFORM_PSERIES_LPAR) {
3033 prom_query_opal();
3034 if (of_platform == PLATFORM_OPAL) {
3035 prom_opal_hold_cpus();
3036 prom_opal_takeover();
3037 }
3038 } else
3039#endif /* __BIG_ENDIAN__ */
3040 if (of_platform == PLATFORM_OPAL) 2829 if (of_platform == PLATFORM_OPAL)
3041 prom_instantiate_opal(); 2830 prom_instantiate_opal();
3042#endif /* CONFIG_PPC_POWERNV */ 2831#endif /* CONFIG_PPC_POWERNV */
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 77aa1e95e904..fe8e54b9ef7d 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
21__secondary_hold_acknowledge __secondary_hold_spinloop __start 21__secondary_hold_acknowledge __secondary_hold_spinloop __start
22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
23reloc_got2 kernstart_addr memstart_addr linux_banner _stext 23reloc_got2 kernstart_addr memstart_addr linux_banner _stext
24opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry 24__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
25boot_command_line __prom_init_toc_start __prom_init_toc_end
26btext_setup_display TOC."
27 25
28NM="$1" 26NM="$1"
29OBJ="$2" 27OBJ="$2"
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index e239df3768ac..e5b022c55ccd 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
469 } 469 }
470 470
471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { 471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
472 bool avail;
473
472 DBG(" thread %d -> cpu %d (hard id %d)\n", 474 DBG(" thread %d -> cpu %d (hard id %d)\n",
473 j, cpu, be32_to_cpu(intserv[j])); 475 j, cpu, be32_to_cpu(intserv[j]));
474 set_cpu_present(cpu, of_device_is_available(dn)); 476
477 avail = of_device_is_available(dn);
478 if (!avail)
479 avail = !of_property_match_string(dn,
480 "enable-method", "spin-table");
481
482 set_cpu_present(cpu, avail);
475 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); 483 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
476 set_cpu_possible(cpu, true); 484 set_cpu_possible(cpu, true);
477 cpu++; 485 cpu++;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 4e47db686b5d..1bc5a1755ed4 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -54,7 +54,6 @@
54 54
55#include "signal.h" 55#include "signal.h"
56 56
57#undef DEBUG_SIG
58 57
59#ifdef CONFIG_PPC64 58#ifdef CONFIG_PPC64
60#define sys_rt_sigreturn compat_sys_rt_sigreturn 59#define sys_rt_sigreturn compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
1063 return 1; 1062 return 1;
1064 1063
1065badframe: 1064badframe:
1066#ifdef DEBUG_SIG
1067 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
1068 regs, frame, newsp);
1069#endif
1070 if (show_unhandled_signals) 1065 if (show_unhandled_signals)
1071 printk_ratelimited(KERN_INFO 1066 printk_ratelimited(KERN_INFO
1072 "%s[%d]: bad frame in handle_rt_signal32: " 1067 "%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1484 return 1; 1479 return 1;
1485 1480
1486badframe: 1481badframe:
1487#ifdef DEBUG_SIG
1488 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1489 regs, frame, newsp);
1490#endif
1491 if (show_unhandled_signals) 1482 if (show_unhandled_signals)
1492 printk_ratelimited(KERN_INFO 1483 printk_ratelimited(KERN_INFO
1493 "%s[%d]: bad frame in handle_signal32: " 1484 "%s[%d]: bad frame in handle_signal32: "
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d501dc4dc3e6..97c1e4b683fc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -38,7 +38,6 @@
38 38
39#include "signal.h" 39#include "signal.h"
40 40
41#define DEBUG_SIG 0
42 41
43#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) 42#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
44#define FP_REGS_SIZE sizeof(elf_fpregset_t) 43#define FP_REGS_SIZE sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
700 return 0; 699 return 0;
701 700
702badframe: 701badframe:
703#if DEBUG_SIG
704 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
705 regs, uc, &uc->uc_mcontext);
706#endif
707 if (show_unhandled_signals) 702 if (show_unhandled_signals)
708 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 703 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
709 current->comm, current->pid, "rt_sigreturn", 704 current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
809 return 1; 804 return 1;
810 805
811badframe: 806badframe:
812#if DEBUG_SIG
813 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
814 regs, frame, newsp);
815#endif
816 if (show_unhandled_signals) 807 if (show_unhandled_signals)
817 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 808 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
818 current->comm, current->pid, "setup_rt_frame", 809 current->comm, current->pid, "setup_rt_frame",
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 94560db788bf..2c15ff094483 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i
125static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) 125static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
126{ 126{
127 u64 reg_value; 127 u64 reg_value;
128 int temp; 128 unsigned int temp;
129 u64 new_value; 129 u64 new_value;
130 int ret; 130 int ret;
131 131
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index d55891f89a2c..4ad227d04c1a 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,4 +1,4 @@
1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o 1obj-y += setup.o opal-wrappers.o opal.o opal-async.o
2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o 2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o 3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
4obj-y += opal-msglog.o 4obj-y += opal-msglog.o
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
deleted file mode 100644
index 11a3169ee583..000000000000
--- a/arch/powerpc/platforms/powernv/opal-takeover.S
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * PowerNV OPAL takeover assembly code, for use by prom_init.c
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13#include <asm/hvcall.h>
14#include <asm/asm-offsets.h>
15#include <asm/opal.h>
16
17#define H_HAL_TAKEOVER 0x5124
18#define H_HAL_TAKEOVER_QUERY_MAGIC -1
19
20 .text
21_GLOBAL(opal_query_takeover)
22 mfcr r0
23 stw r0,8(r1)
24 stdu r1,-STACKFRAMESIZE(r1)
25 std r3,STK_PARAM(R3)(r1)
26 std r4,STK_PARAM(R4)(r1)
27 li r3,H_HAL_TAKEOVER
28 li r4,H_HAL_TAKEOVER_QUERY_MAGIC
29 HVSC
30 addi r1,r1,STACKFRAMESIZE
31 ld r10,STK_PARAM(R3)(r1)
32 std r4,0(r10)
33 ld r10,STK_PARAM(R4)(r1)
34 std r5,0(r10)
35 lwz r0,8(r1)
36 mtcrf 0xff,r0
37 blr
38
39_GLOBAL(opal_do_takeover)
40 mfcr r0
41 stw r0,8(r1)
42 mflr r0
43 std r0,16(r1)
44 bl __opal_do_takeover
45 ld r0,16(r1)
46 mtlr r0
47 lwz r0,8(r1)
48 mtcrf 0xff,r0
49 blr
50
51__opal_do_takeover:
52 ld r4,0(r3)
53 ld r5,0x8(r3)
54 ld r6,0x10(r3)
55 ld r7,0x18(r3)
56 ld r8,0x20(r3)
57 ld r9,0x28(r3)
58 ld r10,0x30(r3)
59 ld r11,0x38(r3)
60 li r3,H_HAL_TAKEOVER
61 HVSC
62 blr
63
64 .globl opal_secondary_entry
65opal_secondary_entry:
66 mr r31,r3
67 mfmsr r11
68 li r12,(MSR_SF | MSR_ISF)@highest
69 sldi r12,r12,48
70 or r11,r11,r12
71 mtmsrd r11
72 isync
73 mfspr r4,SPRN_PIR
74 std r4,0(r3)
751: HMT_LOW
76 ld r4,8(r3)
77 cmpli cr0,r4,0
78 beq 1b
79 HMT_MEDIUM
801: addi r3,r31,16
81 bl __opal_do_takeover
82 b 1b
83
84_GLOBAL(opal_enter_rtas)
85 mflr r0
86 std r0,16(r1)
87 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
88
89 /* Because PROM is running in 32b mode, it clobbers the high order half
90 * of all registers that it saves. We therefore save those registers
91 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
92 */
93 SAVE_GPR(2, r1)
94 SAVE_GPR(13, r1)
95 SAVE_8GPRS(14, r1)
96 SAVE_10GPRS(22, r1)
97 mfcr r10
98 mfmsr r11
99 std r10,_CCR(r1)
100 std r11,_MSR(r1)
101
102 /* Get the PROM entrypoint */
103 mtlr r5
104
105 /* Switch MSR to 32 bits mode
106 */
107 li r12,1
108 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
109 andc r11,r11,r12
110 li r12,1
111 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
112 andc r11,r11,r12
113 mtmsrd r11
114 isync
115
116 /* Enter RTAS here... */
117 blrl
118
119 /* Just make sure that r1 top 32 bits didn't get
120 * corrupt by OF
121 */
122 rldicl r1,r1,0,32
123
124 /* Restore the MSR (back to 64 bits) */
125 ld r0,_MSR(r1)
126 MTMSRD(r0)
127 isync
128
129 /* Restore other registers */
130 REST_GPR(2, r1)
131 REST_GPR(13, r1)
132 REST_8GPRS(14, r1)
133 REST_10GPRS(22, r1)
134 ld r4,_CCR(r1)
135 mtcr r4
136
137 addi r1,r1,PROM_FRAME_SIZE
138 ld r0,16(r1)
139 mtlr r0
140 blr
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 62c47bb76517..9e5353ff6d1b 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -476,6 +476,11 @@ void __init alloc_dart_table(void)
476 */ 476 */
477 dart_tablebase = (unsigned long) 477 dart_tablebase = (unsigned long)
478 __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); 478 __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
479 /*
480 * The DART space is later unmapped from the kernel linear mapping and
481 * accessing dart_tablebase during kmemleak scanning will fault.
482 */
483 kmemleak_no_scan((void *)dart_tablebase);
479 484
480 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); 485 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
481} 486}
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 375cffcf7dbd..91d219381306 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void arch_trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(bool);
93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index b2988f25e230..027e09986194 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
239 } 239 }
240} 240}
241 241
242void arch_trigger_all_cpu_backtrace(void) 242void arch_trigger_all_cpu_backtrace(bool include_self)
243{ 243{
244 struct thread_info *tp = current_thread_info(); 244 struct thread_info *tp = current_thread_info();
245 struct pt_regs *regs = get_irq_regs(); 245 struct pt_regs *regs = get_irq_regs();
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void)
251 251
252 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 252 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
253 253
254 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
255
256 this_cpu = raw_smp_processor_id(); 254 this_cpu = raw_smp_processor_id();
257 255
258 __global_reg_self(tp, regs, this_cpu); 256 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
257
258 if (include_self)
259 __global_reg_self(tp, regs, this_cpu);
259 260
260 smp_fetch_global_regs(); 261 smp_fetch_global_regs();
261 262
262 for_each_online_cpu(cpu) { 263 for_each_online_cpu(cpu) {
263 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; 264 struct global_reg_snapshot *gp;
265
266 if (!include_self && cpu == this_cpu)
267 continue;
268
269 gp = &global_cpu_snapshot[cpu].reg;
264 270
265 __global_reg_poll(gp); 271 __global_reg_poll(gp);
266 272
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void)
292 298
293static void sysrq_handle_globreg(int key) 299static void sysrq_handle_globreg(int key)
294{ 300{
295 arch_trigger_all_cpu_backtrace(); 301 arch_trigger_all_cpu_backtrace(true);
296} 302}
297 303
298static struct sysrq_key_op sparc_globalreg_op = { 304static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index cb6cfcd034cf..a80cbb88ea91 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
43extern void init_ISA_irqs(void); 43extern void init_ISA_irqs(void);
44 44
45#ifdef CONFIG_X86_LOCAL_APIC 45#ifdef CONFIG_X86_LOCAL_APIC
46void arch_trigger_all_cpu_backtrace(void); 46void arch_trigger_all_cpu_backtrace(bool);
47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
48#endif 48#endif
49 49
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index c3fcb5de5083..6a1e71bde323 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
33/* "in progress" flag of arch_trigger_all_cpu_backtrace */ 33/* "in progress" flag of arch_trigger_all_cpu_backtrace */
34static unsigned long backtrace_flag; 34static unsigned long backtrace_flag;
35 35
36void arch_trigger_all_cpu_backtrace(void) 36void arch_trigger_all_cpu_backtrace(bool include_self)
37{ 37{
38 int i; 38 int i;
39 int cpu = get_cpu();
39 40
40 if (test_and_set_bit(0, &backtrace_flag)) 41 if (test_and_set_bit(0, &backtrace_flag)) {
41 /* 42 /*
42 * If there is already a trigger_all_cpu_backtrace() in progress 43 * If there is already a trigger_all_cpu_backtrace() in progress
43 * (backtrace_flag == 1), don't output double cpu dump infos. 44 * (backtrace_flag == 1), don't output double cpu dump infos.
44 */ 45 */
46 put_cpu();
45 return; 47 return;
48 }
46 49
47 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 50 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
51 if (!include_self)
52 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
48 53
49 printk(KERN_INFO "sending NMI to all CPUs:\n"); 54 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
50 apic->send_IPI_all(NMI_VECTOR); 55 pr_info("sending NMI to %s CPUs:\n",
56 (include_self ? "all" : "other"));
57 apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
58 }
51 59
52 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 60 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
53 for (i = 0; i < 10 * 1000; i++) { 61 for (i = 0; i < 10 * 1000; i++) {
54 if (cpumask_empty(to_cpumask(backtrace_mask))) 62 if (cpumask_empty(to_cpumask(backtrace_mask)))
55 break; 63 break;
56 mdelay(1); 64 mdelay(1);
65 touch_softlockup_watchdog();
57 } 66 }
58 67
59 clear_bit(0, &backtrace_flag); 68 clear_bit(0, &backtrace_flag);
60 smp_mb__after_atomic(); 69 smp_mb__after_atomic();
70 put_cpu();
61} 71}
62 72
63static int 73static int
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0da82b8e634..dbaa23e78b36 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -423,9 +423,10 @@ sysenter_past_esp:
423 jnz sysenter_audit 423 jnz sysenter_audit
424sysenter_do_call: 424sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae syscall_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp) 428 movl %eax,PT_EAX(%esp)
429sysenter_after_call:
429 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
430 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
431 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -675,7 +676,12 @@ END(syscall_fault)
675 676
676syscall_badsys: 677syscall_badsys:
677 movl $-ENOSYS,PT_EAX(%esp) 678 movl $-ENOSYS,PT_EAX(%esp)
678 jmp resume_userspace 679 jmp syscall_exit
680END(syscall_badsys)
681
682sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp)
684 jmp sysenter_after_call
679END(syscall_badsys) 685END(syscall_badsys)
680 CFI_ENDPROC 686 CFI_ENDPROC
681 687
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index a0da58db43a8..2851d63c1202 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
363 363
364 /* Set up to return from userspace. */ 364 /* Set up to return from userspace. */
365 restorer = current->mm->context.vdso + 365 restorer = current->mm->context.vdso +
366 selected_vdso32->sym___kernel_sigreturn; 366 selected_vdso32->sym___kernel_rt_sigreturn;
367 if (ksig->ka.sa.sa_flags & SA_RESTORER) 367 if (ksig->ka.sa.sa_flags & SA_RESTORER)
368 restorer = ksig->ka.sa.sa_restorer; 368 restorer = ksig->ka.sa.sa_restorer;
369 put_user_ex(restorer, &frame->pretcode); 369 put_user_ex(restorer, &frame->pretcode);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 3c0809a0631f..61b04fe36e66 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT) := y
11 11
12# files to link into the vdso 12# files to link into the vdso
13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o 13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
14vobjs-nox32 := vdso-fakesections.o
15 14
16# files to link into kernel 15# files to link into kernel
17obj-y += vma.o 16obj-y += vma.o
@@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
67# 66#
68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 67CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 68 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
70 -fno-omit-frame-pointer -foptimize-sibling-calls 69 -fno-omit-frame-pointer -foptimize-sibling-calls \
70 -DDISABLE_BRANCH_PROFILING
71 71
72$(vobjs): KBUILD_CFLAGS += $(CFL) 72$(vobjs): KBUILD_CFLAGS += $(CFL)
73 73
@@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
134 134
135targets += vdso32/vdso32.lds 135targets += vdso32/vdso32.lds
136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o) 136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
137targets += vdso32/vclock_gettime.o 137targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
138 138
139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%) 139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
140 140
@@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) 150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
153KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
153$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 154$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
154 155
155$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ 156$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
156 $(obj)/vdso32/vdso32.lds \ 157 $(obj)/vdso32/vdso32.lds \
157 $(obj)/vdso32/vclock_gettime.o \ 158 $(obj)/vdso32/vclock_gettime.o \
159 $(obj)/vdso32/vdso-fakesections.o \
158 $(obj)/vdso32/note.o \ 160 $(obj)/vdso32/note.o \
159 $(obj)/vdso32/%.o 161 $(obj)/vdso32/%.o
160 $(call if_changed,vdso) 162 $(call if_changed,vdso)
@@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO $@
169 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' 171 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
170 172
171VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ 173VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
172 -Wl,-Bsymbolic $(LTO_CFLAGS) 174 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
173GCOV_PROFILE := n 175GCOV_PROFILE := n
174 176
175# 177#
176# Install the unstripped copies of vdso*.so. 178# Install the unstripped copies of vdso*.so. If our toolchain supports
179# build-id, install .build-id links as well.
177# 180#
178quiet_cmd_vdso_install = INSTALL $(@:install_%=%) 181quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
179 cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) 182define cmd_vdso_install
183 cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
184 if readelf -n $< |grep -q 'Build ID'; then \
185 buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
186 first=`echo $$buildid | cut -b-2`; \
187 last=`echo $$buildid | cut -b3-`; \
188 mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
189 ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
190 fi
191endef
180 192
181vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) 193vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
182 194
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index b2e4f493e5b0..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -11,9 +11,6 @@
11 * Check with readelf after changing. 11 * Check with readelf after changing.
12 */ 12 */
13 13
14/* Disable profiling for userspace code: */
15#define DISABLE_BRANCH_PROFILING
16
17#include <uapi/linux/time.h> 14#include <uapi/linux/time.h>
18#include <asm/vgtod.h> 15#include <asm/vgtod.h>
19#include <asm/hpet.h> 16#include <asm/hpet.h>
diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c
index cb8a8d72c24b..aa5fbfab20a5 100644
--- a/arch/x86/vdso/vdso-fakesections.c
+++ b/arch/x86/vdso/vdso-fakesections.c
@@ -2,31 +2,20 @@
2 * Copyright 2014 Andy Lutomirski 2 * Copyright 2014 Andy Lutomirski
3 * Subject to the GNU Public License, v.2 3 * Subject to the GNU Public License, v.2
4 * 4 *
5 * Hack to keep broken Go programs working. 5 * String table for loadable section headers. See vdso2c.h for why
6 * 6 * this exists.
7 * The Go runtime had a couple of bugs: it would read the section table to try
8 * to figure out how many dynamic symbols there were (it shouldn't have looked
9 * at the section table at all) and, if there were no SHT_SYNDYM section table
10 * entry, it would use an uninitialized value for the number of symbols. As a
11 * workaround, we supply a minimal section table. vdso2c will adjust the
12 * in-memory image so that "vdso_fake_sections" becomes the section table.
13 *
14 * The bug was introduced by:
15 * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
16 * and is being addressed in the Go runtime in this issue:
17 * https://code.google.com/p/go/issues/detail?id=8197
18 */ 7 */
19 8
20#ifndef __x86_64__ 9const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
21#error This hack is specific to the 64-bit vDSO 10 ".hash\0"
22#endif 11 ".dynsym\0"
23 12 ".dynstr\0"
24#include <linux/elf.h> 13 ".gnu.version\0"
25 14 ".gnu.version_d\0"
26extern const __visible struct elf64_shdr vdso_fake_sections[]; 15 ".dynamic\0"
27const __visible struct elf64_shdr vdso_fake_sections[] = { 16 ".rodata\0"
28 { 17 ".fake_shstrtab\0" /* Yay, self-referential code. */
29 .sh_type = SHT_DYNSYM, 18 ".note\0"
30 .sh_entsize = sizeof(Elf64_Sym), 19 ".eh_frame_hdr\0"
31 } 20 ".eh_frame\0"
32}; 21 ".text";
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 2ec72f651ebf..9197544eea9a 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -6,6 +6,16 @@
6 * This script controls its layout. 6 * This script controls its layout.
7 */ 7 */
8 8
9#if defined(BUILD_VDSO64)
10# define SHDR_SIZE 64
11#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
12# define SHDR_SIZE 40
13#else
14# error unknown VDSO target
15#endif
16
17#define NUM_FAKE_SHDRS 13
18
9SECTIONS 19SECTIONS
10{ 20{
11 . = SIZEOF_HEADERS; 21 . = SIZEOF_HEADERS;
@@ -18,36 +28,53 @@ SECTIONS
18 .gnu.version_d : { *(.gnu.version_d) } 28 .gnu.version_d : { *(.gnu.version_d) }
19 .gnu.version_r : { *(.gnu.version_r) } 29 .gnu.version_r : { *(.gnu.version_r) }
20 30
31 .dynamic : { *(.dynamic) } :text :dynamic
32
33 .rodata : {
34 *(.rodata*)
35 *(.data*)
36 *(.sdata*)
37 *(.got.plt) *(.got)
38 *(.gnu.linkonce.d.*)
39 *(.bss*)
40 *(.dynbss*)
41 *(.gnu.linkonce.b.*)
42
43 /*
44 * Ideally this would live in a C file, but that won't
45 * work cleanly for x32 until we start building the x32
46 * C code using an x32 toolchain.
47 */
48 VDSO_FAKE_SECTION_TABLE_START = .;
49 . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
50 VDSO_FAKE_SECTION_TABLE_END = .;
51 } :text
52
53 .fake_shstrtab : { *(.fake_shstrtab) } :text
54
55
21 .note : { *(.note.*) } :text :note 56 .note : { *(.note.*) } :text :note
22 57
23 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 58 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
24 .eh_frame : { KEEP (*(.eh_frame)) } :text 59 .eh_frame : { KEEP (*(.eh_frame)) } :text
25 60
26 .dynamic : { *(.dynamic) } :text :dynamic
27
28 .rodata : { *(.rodata*) } :text
29 .data : {
30 *(.data*)
31 *(.sdata*)
32 *(.got.plt) *(.got)
33 *(.gnu.linkonce.d.*)
34 *(.bss*)
35 *(.dynbss*)
36 *(.gnu.linkonce.b.*)
37 }
38
39 .altinstructions : { *(.altinstructions) }
40 .altinstr_replacement : { *(.altinstr_replacement) }
41 61
42 /* 62 /*
43 * Align the actual code well away from the non-instruction data. 63 * Text is well-separated from actual data: there's plenty of
44 * This is the best thing for the I-cache. 64 * stuff that isn't used at runtime in between.
45 */ 65 */
46 . = ALIGN(0x100);
47 66
48 .text : { *(.text*) } :text =0x90909090, 67 .text : { *(.text*) } :text =0x90909090,
49 68
50 /* 69 /*
70 * At the end so that eu-elflint stays happy when vdso2c strips
71 * these. A better implementation would avoid allocating space
72 * for these.
73 */
74 .altinstructions : { *(.altinstructions) } :text
75 .altinstr_replacement : { *(.altinstr_replacement) } :text
76
77 /*
51 * The remainder of the vDSO consists of special pages that are 78 * The remainder of the vDSO consists of special pages that are
52 * shared between the kernel and userspace. It needs to be at the 79 * shared between the kernel and userspace. It needs to be at the
53 * end so that it doesn't overlap the mapping of the actual 80 * end so that it doesn't overlap the mapping of the actual
@@ -75,6 +102,7 @@ SECTIONS
75 /DISCARD/ : { 102 /DISCARD/ : {
76 *(.discard) 103 *(.discard)
77 *(.discard.*) 104 *(.discard.*)
105 *(__bug_table)
78 } 106 }
79} 107}
80 108
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
index 75e3404c83b1..6807932643c2 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/vdso/vdso.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSO64
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 7a6bf50f9165..238dbe82776e 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -23,6 +23,8 @@ enum {
23 sym_vvar_page, 23 sym_vvar_page,
24 sym_hpet_page, 24 sym_hpet_page,
25 sym_end_mapping, 25 sym_end_mapping,
26 sym_VDSO_FAKE_SECTION_TABLE_START,
27 sym_VDSO_FAKE_SECTION_TABLE_END,
26}; 28};
27 29
28const int special_pages[] = { 30const int special_pages[] = {
@@ -30,15 +32,26 @@ const int special_pages[] = {
30 sym_hpet_page, 32 sym_hpet_page,
31}; 33};
32 34
33char const * const required_syms[] = { 35struct vdso_sym {
34 [sym_vvar_page] = "vvar_page", 36 const char *name;
35 [sym_hpet_page] = "hpet_page", 37 bool export;
36 [sym_end_mapping] = "end_mapping", 38};
37 "VDSO32_NOTE_MASK", 39
38 "VDSO32_SYSENTER_RETURN", 40struct vdso_sym required_syms[] = {
39 "__kernel_vsyscall", 41 [sym_vvar_page] = {"vvar_page", true},
40 "__kernel_sigreturn", 42 [sym_hpet_page] = {"hpet_page", true},
41 "__kernel_rt_sigreturn", 43 [sym_end_mapping] = {"end_mapping", true},
44 [sym_VDSO_FAKE_SECTION_TABLE_START] = {
45 "VDSO_FAKE_SECTION_TABLE_START", false
46 },
47 [sym_VDSO_FAKE_SECTION_TABLE_END] = {
48 "VDSO_FAKE_SECTION_TABLE_END", false
49 },
50 {"VDSO32_NOTE_MASK", true},
51 {"VDSO32_SYSENTER_RETURN", true},
52 {"__kernel_vsyscall", true},
53 {"__kernel_sigreturn", true},
54 {"__kernel_rt_sigreturn", true},
42}; 55};
43 56
44__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 57__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@@ -83,37 +96,21 @@ extern void bad_put_le(void);
83 96
84#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) 97#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
85 98
86#define BITS 64 99#define BITSFUNC3(name, bits) name##bits
87#define GOFUNC go64 100#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
88#define Elf_Ehdr Elf64_Ehdr 101#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
89#define Elf_Shdr Elf64_Shdr 102
90#define Elf_Phdr Elf64_Phdr 103#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
91#define Elf_Sym Elf64_Sym 104#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
92#define Elf_Dyn Elf64_Dyn 105#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
106
107#define ELF_BITS 64
93#include "vdso2c.h" 108#include "vdso2c.h"
94#undef BITS 109#undef ELF_BITS
95#undef GOFUNC 110
96#undef Elf_Ehdr 111#define ELF_BITS 32
97#undef Elf_Shdr
98#undef Elf_Phdr
99#undef Elf_Sym
100#undef Elf_Dyn
101
102#define BITS 32
103#define GOFUNC go32
104#define Elf_Ehdr Elf32_Ehdr
105#define Elf_Shdr Elf32_Shdr
106#define Elf_Phdr Elf32_Phdr
107#define Elf_Sym Elf32_Sym
108#define Elf_Dyn Elf32_Dyn
109#include "vdso2c.h" 112#include "vdso2c.h"
110#undef BITS 113#undef ELF_BITS
111#undef GOFUNC
112#undef Elf_Ehdr
113#undef Elf_Shdr
114#undef Elf_Phdr
115#undef Elf_Sym
116#undef Elf_Dyn
117 114
118static void go(void *addr, size_t len, FILE *outfile, const char *name) 115static void go(void *addr, size_t len, FILE *outfile, const char *name)
119{ 116{
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index c6eefaf389b9..df95a2fdff73 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -4,23 +4,136 @@
4 * are built for 32-bit userspace. 4 * are built for 32-bit userspace.
5 */ 5 */
6 6
7static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) 7/*
8 * We're writing a section table for a few reasons:
9 *
10 * The Go runtime had a couple of bugs: it would read the section
11 * table to try to figure out how many dynamic symbols there were (it
12 * shouldn't have looked at the section table at all) and, if there
13 * were no SHT_SYNDYM section table entry, it would use an
14 * uninitialized value for the number of symbols. An empty DYNSYM
15 * table would work, but I see no reason not to write a valid one (and
16 * keep full performance for old Go programs). This hack is only
17 * needed on x86_64.
18 *
19 * The bug was introduced on 2012-08-31 by:
20 * https://code.google.com/p/go/source/detail?r=56ea40aac72b
21 * and was fixed on 2014-06-13 by:
22 * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
23 *
24 * Binutils has issues debugging the vDSO: it reads the section table to
25 * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
26 * would break build-id if we removed the section table. Binutils
27 * also requires that shstrndx != 0. See:
28 * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
29 *
30 * elfutils might not look for PT_NOTE if there is a section table at
31 * all. I don't know whether this matters for any practical purpose.
32 *
33 * For simplicity, rather than hacking up a partial section table, we
34 * just write a mostly complete one. We omit non-dynamic symbols,
35 * though, since they're rather large.
36 *
37 * Once binutils gets fixed, we might be able to drop this for all but
38 * the 64-bit vdso, since build-id only works in kernel RPMs, and
39 * systems that update to new enough kernel RPMs will likely update
40 * binutils in sync. build-id has never worked for home-built kernel
41 * RPMs without manual symlinking, and I suspect that no one ever does
42 * that.
43 */
44struct BITSFUNC(fake_sections)
45{
46 ELF(Shdr) *table;
47 unsigned long table_offset;
48 int count, max_count;
49
50 int in_shstrndx;
51 unsigned long shstr_offset;
52 const char *shstrtab;
53 size_t shstrtab_len;
54
55 int out_shstrndx;
56};
57
58static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
59 const char *name)
60{
61 const char *outname = out->shstrtab;
62 while (outname - out->shstrtab < out->shstrtab_len) {
63 if (!strcmp(name, outname))
64 return (outname - out->shstrtab) + out->shstr_offset;
65 outname += strlen(outname) + 1;
66 }
67
68 if (*name)
69 printf("Warning: could not find output name \"%s\"\n", name);
70 return out->shstr_offset + out->shstrtab_len - 1; /* Use a null. */
71}
72
73static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
74{
75 if (!out->in_shstrndx)
76 fail("didn't find the fake shstrndx\n");
77
78 memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
79
80 if (out->max_count < 1)
81 fail("we need at least two fake output sections\n");
82
83 PUT_LE(&out->table[0].sh_type, SHT_NULL);
84 PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
85
86 out->count = 1;
87}
88
89static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
90 int in_idx, const ELF(Shdr) *in,
91 const char *name)
92{
93 uint64_t flags = GET_LE(&in->sh_flags);
94
95 bool copy = flags & SHF_ALLOC &&
96 strcmp(name, ".altinstructions") &&
97 strcmp(name, ".altinstr_replacement");
98
99 if (!copy)
100 return;
101
102 if (out->count >= out->max_count)
103 fail("too many copied sections (max = %d)\n", out->max_count);
104
105 if (in_idx == out->in_shstrndx)
106 out->out_shstrndx = out->count;
107
108 out->table[out->count] = *in;
109 PUT_LE(&out->table[out->count].sh_name,
110 BITSFUNC(find_shname)(out, name));
111
112 /* elfutils requires that a strtab have the correct type. */
113 if (!strcmp(name, ".fake_shstrtab"))
114 PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
115
116 out->count++;
117}
118
119static void BITSFUNC(go)(void *addr, size_t len,
120 FILE *outfile, const char *name)
8{ 121{
9 int found_load = 0; 122 int found_load = 0;
10 unsigned long load_size = -1; /* Work around bogus warning */ 123 unsigned long load_size = -1; /* Work around bogus warning */
11 unsigned long data_size; 124 unsigned long data_size;
12 Elf_Ehdr *hdr = (Elf_Ehdr *)addr; 125 ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
13 int i; 126 int i;
14 unsigned long j; 127 unsigned long j;
15 Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, 128 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
16 *alt_sec = NULL; 129 *alt_sec = NULL;
17 Elf_Dyn *dyn = 0, *dyn_end = 0; 130 ELF(Dyn) *dyn = 0, *dyn_end = 0;
18 const char *secstrings; 131 const char *secstrings;
19 uint64_t syms[NSYMS] = {}; 132 uint64_t syms[NSYMS] = {};
20 133
21 uint64_t fake_sections_value = 0, fake_sections_size = 0; 134 struct BITSFUNC(fake_sections) fake_sections = {};
22 135
23 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); 136 ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
24 137
25 /* Walk the segment table. */ 138 /* Walk the segment table. */
26 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { 139 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@@ -51,7 +164,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
51 for (i = 0; dyn + i < dyn_end && 164 for (i = 0; dyn + i < dyn_end &&
52 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { 165 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
53 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); 166 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
54 if (tag == DT_REL || tag == DT_RELSZ || 167 if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
55 tag == DT_RELENT || tag == DT_TEXTREL) 168 tag == DT_RELENT || tag == DT_TEXTREL)
56 fail("vdso image contains dynamic relocations\n"); 169 fail("vdso image contains dynamic relocations\n");
57 } 170 }
@@ -61,7 +174,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
61 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); 174 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
62 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset); 175 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
63 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { 176 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
64 Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) + 177 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
65 GET_LE(&hdr->e_shentsize) * i; 178 GET_LE(&hdr->e_shentsize) * i;
66 if (GET_LE(&sh->sh_type) == SHT_SYMTAB) 179 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
67 symtab_hdr = sh; 180 symtab_hdr = sh;
@@ -82,29 +195,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
82 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); 195 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
83 i++) { 196 i++) {
84 int k; 197 int k;
85 Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) + 198 ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
86 GET_LE(&symtab_hdr->sh_entsize) * i; 199 GET_LE(&symtab_hdr->sh_entsize) * i;
87 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + 200 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
88 GET_LE(&sym->st_name); 201 GET_LE(&sym->st_name);
89 202
90 for (k = 0; k < NSYMS; k++) { 203 for (k = 0; k < NSYMS; k++) {
91 if (!strcmp(name, required_syms[k])) { 204 if (!strcmp(name, required_syms[k].name)) {
92 if (syms[k]) { 205 if (syms[k]) {
93 fail("duplicate symbol %s\n", 206 fail("duplicate symbol %s\n",
94 required_syms[k]); 207 required_syms[k].name);
95 } 208 }
96 syms[k] = GET_LE(&sym->st_value); 209 syms[k] = GET_LE(&sym->st_value);
97 } 210 }
98 } 211 }
99 212
100 if (!strcmp(name, "vdso_fake_sections")) { 213 if (!strcmp(name, "fake_shstrtab")) {
101 if (fake_sections_value) 214 ELF(Shdr) *sh;
102 fail("duplicate vdso_fake_sections\n"); 215
103 fake_sections_value = GET_LE(&sym->st_value); 216 fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
104 fake_sections_size = GET_LE(&sym->st_size); 217 fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
218 fake_sections.shstrtab_len = GET_LE(&sym->st_size);
219 sh = addr + GET_LE(&hdr->e_shoff) +
220 GET_LE(&hdr->e_shentsize) *
221 fake_sections.in_shstrndx;
222 fake_sections.shstr_offset = GET_LE(&sym->st_value) -
223 GET_LE(&sh->sh_addr);
105 } 224 }
106 } 225 }
107 226
227 /* Build the output section table. */
228 if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
229 !syms[sym_VDSO_FAKE_SECTION_TABLE_END])
230 fail("couldn't find fake section table\n");
231 if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
232 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
233 fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
234 fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
235 fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
236 fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
237 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
238 sizeof(ELF(Shdr));
239
240 BITSFUNC(init_sections)(&fake_sections);
241 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
242 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
243 GET_LE(&hdr->e_shentsize) * i;
244 BITSFUNC(copy_section)(&fake_sections, i, sh,
245 secstrings + GET_LE(&sh->sh_name));
246 }
247 if (!fake_sections.out_shstrndx)
248 fail("didn't generate shstrndx?!?\n");
249
250 PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
251 PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
252 PUT_LE(&hdr->e_shnum, fake_sections.count);
253 PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
254
108 /* Validate mapping addresses. */ 255 /* Validate mapping addresses. */
109 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) { 256 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
110 if (!syms[i]) 257 if (!syms[i])
@@ -112,25 +259,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
112 259
113 if (syms[i] % 4096) 260 if (syms[i] % 4096)
114 fail("%s must be a multiple of 4096\n", 261 fail("%s must be a multiple of 4096\n",
115 required_syms[i]); 262 required_syms[i].name);
116 if (syms[i] < data_size) 263 if (syms[i] < data_size)
117 fail("%s must be after the text mapping\n", 264 fail("%s must be after the text mapping\n",
118 required_syms[i]); 265 required_syms[i].name);
119 if (syms[sym_end_mapping] < syms[i] + 4096) 266 if (syms[sym_end_mapping] < syms[i] + 4096)
120 fail("%s overruns end_mapping\n", required_syms[i]); 267 fail("%s overruns end_mapping\n",
268 required_syms[i].name);
121 } 269 }
122 if (syms[sym_end_mapping] % 4096) 270 if (syms[sym_end_mapping] % 4096)
123 fail("end_mapping must be a multiple of 4096\n"); 271 fail("end_mapping must be a multiple of 4096\n");
124 272
125 /* Remove sections or use fakes */
126 if (fake_sections_size % sizeof(Elf_Shdr))
127 fail("vdso_fake_sections size is not a multiple of %ld\n",
128 (long)sizeof(Elf_Shdr));
129 PUT_LE(&hdr->e_shoff, fake_sections_value);
130 PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
131 PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
132 PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
133
134 if (!name) { 273 if (!name) {
135 fwrite(addr, load_size, 1, outfile); 274 fwrite(addr, load_size, 1, outfile);
136 return; 275 return;
@@ -168,9 +307,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
168 (unsigned long)GET_LE(&alt_sec->sh_size)); 307 (unsigned long)GET_LE(&alt_sec->sh_size));
169 } 308 }
170 for (i = 0; i < NSYMS; i++) { 309 for (i = 0; i < NSYMS; i++) {
171 if (syms[i]) 310 if (required_syms[i].export && syms[i])
172 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n", 311 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
173 required_syms[i], syms[i]); 312 required_syms[i].name, syms[i]);
174 } 313 }
175 fprintf(outfile, "};\n"); 314 fprintf(outfile, "};\n");
176} 315}
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
new file mode 100644
index 000000000000..541468e25265
--- /dev/null
+++ b/arch/x86/vdso/vdso32/vdso-fakesections.c
@@ -0,0 +1 @@
#include "../vdso-fakesections.c"
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
index 46b991b578a8..697c11ece90c 100644
--- a/arch/x86/vdso/vdsox32.lds.S
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSOX32
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*
diff --git a/block/bio.c b/block/bio.c
index 8c2e55e39a1b..0ec61c9e536c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
746 746
747 goto done; 747 goto done;
748 } 748 }
749
750 /*
751 * If the queue doesn't support SG gaps and adding this
752 * offset would create a gap, disallow it.
753 */
754 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
755 bvec_gap_to_prev(prev, offset))
756 return 0;
749 } 757 }
750 758
751 if (bio->bi_vcnt >= bio->bi_max_vecs) 759 if (bio->bi_vcnt >= bio->bi_max_vecs)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 069bc202ffe3..b9f4cc494ece 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
80 blkg->q = q; 80 blkg->q = q;
81 INIT_LIST_HEAD(&blkg->q_node); 81 INIT_LIST_HEAD(&blkg->q_node);
82 blkg->blkcg = blkcg; 82 blkg->blkcg = blkcg;
83 blkg->refcnt = 1; 83 atomic_set(&blkg->refcnt, 1);
84 84
85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
86 if (blkcg != &blkcg_root) { 86 if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
399 399
400 /* release the blkcg and parent blkg refs this blkg has been holding */ 400 /* release the blkcg and parent blkg refs this blkg has been holding */
401 css_put(&blkg->blkcg->css); 401 css_put(&blkg->blkcg->css);
402 if (blkg->parent) { 402 if (blkg->parent)
403 spin_lock_irq(blkg->q->queue_lock);
404 blkg_put(blkg->parent); 403 blkg_put(blkg->parent);
405 spin_unlock_irq(blkg->q->queue_lock);
406 }
407 404
408 blkg_free(blkg); 405 blkg_free(blkg);
409} 406}
@@ -1093,7 +1090,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1093 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1090 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1094 * successful registration. Returns 0 on success and -errno on failure. 1091 * successful registration. Returns 0 on success and -errno on failure.
1095 */ 1092 */
1096int __init blkcg_policy_register(struct blkcg_policy *pol) 1093int blkcg_policy_register(struct blkcg_policy *pol)
1097{ 1094{
1098 int i, ret; 1095 int i, ret;
1099 1096
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index cbb7f943f78a..d3fd7aa3d2a3 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -18,6 +18,7 @@
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/atomic.h>
21 22
22/* Max limits for throttle policy */ 23/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX 24#define THROTL_IOPS_MAX UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
104 struct request_list rl; 105 struct request_list rl;
105 106
106 /* reference count */ 107 /* reference count */
107 int refcnt; 108 atomic_t refcnt;
108 109
109 /* is this blkg online? protected by both blkcg and q locks */ 110 /* is this blkg online? protected by both blkcg and q locks */
110 bool online; 111 bool online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q); 146void blkcg_exit_queue(struct request_queue *q);
146 147
147/* Blkio controller policy registration */ 148/* Blkio controller policy registration */
148int __init blkcg_policy_register(struct blkcg_policy *pol); 149int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol); 150void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q, 151int blkcg_activate_policy(struct request_queue *q,
151 const struct blkcg_policy *pol); 152 const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
257 * blkg_get - get a blkg reference 258 * blkg_get - get a blkg reference
258 * @blkg: blkg to get 259 * @blkg: blkg to get
259 * 260 *
260 * The caller should be holding queue_lock and an existing reference. 261 * The caller should be holding an existing reference.
261 */ 262 */
262static inline void blkg_get(struct blkcg_gq *blkg) 263static inline void blkg_get(struct blkcg_gq *blkg)
263{ 264{
264 lockdep_assert_held(blkg->q->queue_lock); 265 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
265 WARN_ON_ONCE(!blkg->refcnt); 266 atomic_inc(&blkg->refcnt);
266 blkg->refcnt++;
267} 267}
268 268
269void __blkg_release_rcu(struct rcu_head *rcu); 269void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
271/** 271/**
272 * blkg_put - put a blkg reference 272 * blkg_put - put a blkg reference
273 * @blkg: blkg to put 273 * @blkg: blkg to put
274 *
275 * The caller should be holding queue_lock.
276 */ 274 */
277static inline void blkg_put(struct blkcg_gq *blkg) 275static inline void blkg_put(struct blkcg_gq *blkg)
278{ 276{
279 lockdep_assert_held(blkg->q->queue_lock); 277 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
280 WARN_ON_ONCE(blkg->refcnt <= 0); 278 if (atomic_dec_and_test(&blkg->refcnt))
281 if (!--blkg->refcnt)
282 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 279 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283} 280}
284 281
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
580static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 577static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
581static inline void blkcg_drain_queue(struct request_queue *q) { } 578static inline void blkcg_drain_queue(struct request_queue *q) { }
582static inline void blkcg_exit_queue(struct request_queue *q) { } 579static inline void blkcg_exit_queue(struct request_queue *q) { }
583static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 580static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
584static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 581static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
585static inline int blkcg_activate_policy(struct request_queue *q, 582static inline int blkcg_activate_policy(struct request_queue *q,
586 const struct blkcg_policy *pol) { return 0; } 583 const struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3bf0df0f4c2..54535831f1e1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
568 568
569bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 569bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
570{ 570{
571 struct request_queue *q = rq->q;
572
571 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 573 if (!rq_mergeable(rq) || !bio_mergeable(bio))
572 return false; 574 return false;
573 575
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
591 !blk_write_same_mergeable(rq->bio, bio)) 593 !blk_write_same_mergeable(rq->bio, bio))
592 return false; 594 return false;
593 595
596 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
597 struct bio_vec *bprev;
598
599 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
600 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
601 return false;
602 }
603
594 return true; 604 return true;
595} 605}
596 606
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0ef2dc7f01bf..ad69ef657e85 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -878,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
879 879
880 preempt_disable(); 880 preempt_disable();
881 __blk_mq_run_hw_queue(hctx); 881 blk_mq_run_hw_queue(hctx, false);
882 preempt_enable(); 882 preempt_enable();
883} 883}
884EXPORT_SYMBOL(blk_mq_start_hw_queue); 884EXPORT_SYMBOL(blk_mq_start_hw_queue);
diff --git a/block/elevator.c b/block/elevator.c
index 34bded18910e..24c28b659bb3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -825,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
825} 825}
826EXPORT_SYMBOL(elv_unregister_queue); 826EXPORT_SYMBOL(elv_unregister_queue);
827 827
828int __init elv_register(struct elevator_type *e) 828int elv_register(struct elevator_type *e)
829{ 829{
830 char *def = ""; 830 char *def = "";
831 831
diff --git a/crypto/Kconfig b/crypto/Kconfig
index ce4012a58781..749b1e05c490 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -540,6 +540,17 @@ config CRYPTO_SHA1_ARM
540 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented 540 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
541 using optimized ARM assembler. 541 using optimized ARM assembler.
542 542
543config CRYPTO_SHA1_ARM_NEON
544 tristate "SHA1 digest algorithm (ARM NEON)"
545 depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
546 select CRYPTO_SHA1_ARM
547 select CRYPTO_SHA1
548 select CRYPTO_HASH
549 help
550 SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
551 using optimized ARM NEON assembly, when NEON instructions are
552 available.
553
543config CRYPTO_SHA1_PPC 554config CRYPTO_SHA1_PPC
544 tristate "SHA1 digest algorithm (powerpc)" 555 tristate "SHA1 digest algorithm (powerpc)"
545 depends on PPC 556 depends on PPC
@@ -589,6 +600,21 @@ config CRYPTO_SHA512_SPARC64
589 SHA-512 secure hash standard (DFIPS 180-2) implemented 600 SHA-512 secure hash standard (DFIPS 180-2) implemented
590 using sparc64 crypto instructions, when available. 601 using sparc64 crypto instructions, when available.
591 602
603config CRYPTO_SHA512_ARM_NEON
604 tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
605 depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
606 select CRYPTO_SHA512
607 select CRYPTO_HASH
608 help
609 SHA-512 secure hash standard (DFIPS 180-2) implemented
610 using ARM NEON instructions, when available.
611
612 This version of SHA implements a 512 bit hash with 256 bits of
613 security against collision attacks.
614
615 This code also includes SHA-384, a 384 bit hash with 192 bits
616 of security against collision attacks.
617
592config CRYPTO_TGR192 618config CRYPTO_TGR192
593 tristate "Tiger digest algorithms" 619 tristate "Tiger digest algorithms"
594 select CRYPTO_HASH 620 select CRYPTO_HASH
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 83969f8c5727..6467c919c509 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
176 base_pfn = pfn; 176 base_pfn = pfn;
177 for (j = pageblock_nr_pages; j; --j, pfn++) { 177 for (j = pageblock_nr_pages; j; --j, pfn++) {
178 WARN_ON_ONCE(!pfn_valid(pfn)); 178 WARN_ON_ONCE(!pfn_valid(pfn));
179 /*
180 * alloc_contig_range requires the pfn range
181 * specified to be in the same zone. Make this
182 * simple by forcing the entire CMA resv range
183 * to be in the same zone.
184 */
179 if (page_zone(pfn_to_page(pfn)) != zone) 185 if (page_zone(pfn_to_page(pfn)) != zone)
180 return -EINVAL; 186 goto err;
181 } 187 }
182 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 188 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
183 } while (--i); 189 } while (--i);
184 190
185 mutex_init(&cma->lock); 191 mutex_init(&cma->lock);
186 return 0; 192 return 0;
193
194err:
195 kfree(cma->bitmap);
196 return -EINVAL;
187} 197}
188 198
189static struct cma cma_areas[MAX_CMA_AREAS]; 199static struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index b6c8aaf4931b..5b17ec88ea05 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
1337 return 0; 1337 return 0;
1338 } 1338 }
1339 1339
1340 /* Discards don't have any payload.
1341 * But the scsi layer still expects a bio_vec it can use internally,
1342 * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
1340 if (peer_req->flags & EE_IS_TRIM) 1343 if (peer_req->flags & EE_IS_TRIM)
1341 nr_pages = 0; /* discards don't have any payload. */ 1344 nr_pages = 1;
1342 1345
1343 /* In most cases, we will only need one bio. But in case the lower 1346 /* In most cases, we will only need one bio. But in case the lower
1344 * level restrictions happen to be different at this offset on this 1347 * level restrictions happen to be different at this offset on this
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 677db049f55a..56d46ffb08e1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
3777 int drive = cbdata->drive; 3777 int drive = cbdata->drive;
3778 3778
3779 if (err) { 3779 if (err) {
3780 pr_info("floppy: error %d while reading block 0", err); 3780 pr_info("floppy: error %d while reading block 0\n", err);
3781 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3781 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3782 } 3782 }
3783 complete(&cbdata->complete); 3783 complete(&cbdata->complete);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bbeb404b3a07..b2c98c1bc037 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1431,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; 1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1432} 1432}
1433 1433
1434static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1435{
1436 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1437
1438 return obj_request->img_offset <
1439 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1440}
1441
1434static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1442static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1435{ 1443{
1436 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1444 dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -2748,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2748 */ 2756 */
2749 if (!img_request_write_test(img_request) || 2757 if (!img_request_write_test(img_request) ||
2750 !img_request_layered_test(img_request) || 2758 !img_request_layered_test(img_request) ||
2751 rbd_dev->parent_overlap <= obj_request->img_offset || 2759 !obj_request_overlaps_parent(obj_request) ||
2752 ((known = obj_request_known_test(obj_request)) && 2760 ((known = obj_request_known_test(obj_request)) &&
2753 obj_request_exists_test(obj_request))) { 2761 obj_request_exists_test(obj_request))) {
2754 2762
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 60e5a170c4d2..e6833771a716 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -250,7 +250,7 @@ static void __init global_timer_of_register(struct device_node *np)
250 * fire when the timer value is greater than or equal to. In previous 250 * fire when the timer value is greater than or equal to. In previous
251 * revisions the comparators fired when the timer value was equal to. 251 * revisions the comparators fired when the timer value was equal to.
252 */ 252 */
253 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9 253 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9
254 && (read_cpuid_id() & 0xf0000f) < 0x200000) { 254 && (read_cpuid_id() & 0xf0000f) < 0x200000) {
255 pr_warn("global-timer: non support for this cpu version.\n"); 255 pr_warn("global-timer: non support for this cpu version.\n");
256 return; 256 return;
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8d6420013a04..f71d55f5e6e5 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -153,13 +153,10 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
153} 153}
154 154
155/* Clocksource handling */ 155/* Clocksource handling */
156static void exynos4_mct_frc_start(u32 hi, u32 lo) 156static void exynos4_mct_frc_start(void)
157{ 157{
158 u32 reg; 158 u32 reg;
159 159
160 exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
161 exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
162
163 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 160 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
164 reg |= MCT_G_TCON_START; 161 reg |= MCT_G_TCON_START;
165 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
@@ -181,7 +178,7 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
181 178
182static void exynos4_frc_resume(struct clocksource *cs) 179static void exynos4_frc_resume(struct clocksource *cs)
183{ 180{
184 exynos4_mct_frc_start(0, 0); 181 exynos4_mct_frc_start();
185} 182}
186 183
187struct clocksource mct_frc = { 184struct clocksource mct_frc = {
@@ -200,7 +197,7 @@ static u64 notrace exynos4_read_sched_clock(void)
200 197
201static void __init exynos4_clocksource_init(void) 198static void __init exynos4_clocksource_init(void)
202{ 199{
203 exynos4_mct_frc_start(0, 0); 200 exynos4_mct_frc_start();
204 201
205 if (clocksource_register_hz(&mct_frc, clk_rate)) 202 if (clocksource_register_hz(&mct_frc, clk_rate))
206 panic("%s: can't register clocksource\n", mct_frc.name); 203 panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 4b9dc836dcf9..e992abc5ef26 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -40,7 +40,7 @@ struct pstore_read_data {
40static inline u64 generic_id(unsigned long timestamp, 40static inline u64 generic_id(unsigned long timestamp,
41 unsigned int part, int count) 41 unsigned int part, int count)
42{ 42{
43 return (timestamp * 100 + part) * 1000 + count; 43 return ((u64) timestamp * 100 + part) * 1000 + count;
44} 44}
45 45
46static int efi_pstore_read_func(struct efivar_entry *entry, void *data) 46static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index cd36deb619fa..eff1a2f22f09 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -353,10 +353,10 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
353 int depth, void *data) 353 int depth, void *data)
354{ 354{
355 struct param_info *info = data; 355 struct param_info *info = data;
356 void *prop, *dest; 356 const void *prop;
357 unsigned long len; 357 void *dest;
358 u64 val; 358 u64 val;
359 int i; 359 int i, len;
360 360
361 if (depth != 1 || 361 if (depth != 1 ||
362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 5c6a8e8a9580..82d774161cc9 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -63,7 +63,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
63 */ 63 */
64 prev = 0; 64 prev = 0;
65 for (;;) { 65 for (;;) {
66 const char *type, *name; 66 const char *type;
67 int len; 67 int len;
68 68
69 node = fdt_next_node(fdt, prev, NULL); 69 node = fdt_next_node(fdt, prev, NULL);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03711d00aaae..8218078b6133 100644..100755
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
419 retcode = -EFAULT; 419 retcode = -EFAULT;
420 goto err_i1; 420 goto err_i1;
421 } 421 }
422 } else 422 } else if (cmd & IOC_OUT) {
423 memset(kdata, 0, usize); 423 memset(kdata, 0, usize);
424 }
424 425
425 if (ioctl->flags & DRM_UNLOCKED) 426 if (ioctl->flags & DRM_UNLOCKED)
426 retcode = func(dev, kdata, file_priv); 427 retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 482127f633c5..9e530f205ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
40{ 40{
41 struct exynos_dpi *ctx = connector_to_dpi(connector); 41 struct exynos_dpi *ctx = connector_to_dpi(connector);
42 42
43 if (!ctx->panel->connector) 43 if (ctx->panel && !ctx->panel->connector)
44 drm_panel_attach(ctx->panel, &ctx->connector); 44 drm_panel_attach(ctx->panel, &ctx->connector);
45 45
46 return connector_status_connected; 46 return connector_status_connected;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d91f27777537..ab7d182063c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -765,24 +765,24 @@ static int exynos_drm_init(void)
765 765
766 return 0; 766 return 0;
767 767
768err_unregister_pd:
769 platform_device_unregister(exynos_drm_pdev);
770
771err_remove_vidi: 768err_remove_vidi:
772#ifdef CONFIG_DRM_EXYNOS_VIDI 769#ifdef CONFIG_DRM_EXYNOS_VIDI
773 exynos_drm_remove_vidi(); 770 exynos_drm_remove_vidi();
771
772err_unregister_pd:
774#endif 773#endif
774 platform_device_unregister(exynos_drm_pdev);
775 775
776 return ret; 776 return ret;
777} 777}
778 778
779static void exynos_drm_exit(void) 779static void exynos_drm_exit(void)
780{ 780{
781 platform_driver_unregister(&exynos_drm_platform_driver);
781#ifdef CONFIG_DRM_EXYNOS_VIDI 782#ifdef CONFIG_DRM_EXYNOS_VIDI
782 exynos_drm_remove_vidi(); 783 exynos_drm_remove_vidi();
783#endif 784#endif
784 platform_device_unregister(exynos_drm_pdev); 785 platform_device_unregister(exynos_drm_pdev);
785 platform_driver_unregister(&exynos_drm_platform_driver);
786} 786}
787 787
788module_init(exynos_drm_init); 788module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 36535f398848..06cde4506278 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
343int exynos_dpi_remove(struct device *dev); 343int exynos_dpi_remove(struct device *dev);
344#else 344#else
345static inline struct exynos_drm_display * 345static inline struct exynos_drm_display *
346exynos_dpi_probe(struct device *dev) { return 0; } 346exynos_dpi_probe(struct device *dev) { return NULL; }
347static inline int exynos_dpi_remove(struct device *dev) { return 0; } 347static inline int exynos_dpi_remove(struct device *dev) { return 0; }
348#endif 348#endif
349 349
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bb45ab2e7384..33161ad38201 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
741 win_data = &ctx->win_data[i]; 741 win_data = &ctx->win_data[i];
742 if (win_data->enabled) 742 if (win_data->enabled)
743 fimd_win_commit(mgr, i); 743 fimd_win_commit(mgr, i);
744 else
745 fimd_win_disable(mgr, i);
744 } 746 }
745 747
746 fimd_commit(mgr); 748 fimd_commit(mgr);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c104d0c9b385..aa259b0a873a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2090,6 +2090,11 @@ out:
2090 2090
2091static void hdmi_dpms(struct exynos_drm_display *display, int mode) 2091static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2092{ 2092{
2093 struct hdmi_context *hdata = display->ctx;
2094 struct drm_encoder *encoder = hdata->encoder;
2095 struct drm_crtc *crtc = encoder->crtc;
2096 struct drm_crtc_helper_funcs *funcs = NULL;
2097
2093 DRM_DEBUG_KMS("mode %d\n", mode); 2098 DRM_DEBUG_KMS("mode %d\n", mode);
2094 2099
2095 switch (mode) { 2100 switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2099 case DRM_MODE_DPMS_STANDBY: 2104 case DRM_MODE_DPMS_STANDBY:
2100 case DRM_MODE_DPMS_SUSPEND: 2105 case DRM_MODE_DPMS_SUSPEND:
2101 case DRM_MODE_DPMS_OFF: 2106 case DRM_MODE_DPMS_OFF:
2107 /*
2108 * The SFRs of VP and Mixer are updated by Vertical Sync of
2109 * Timing generator which is a part of HDMI so the sequence
2110 * to disable TV Subsystem should be as following,
2111 * VP -> Mixer -> HDMI
2112 *
2113 * Below codes will try to disable Mixer and VP(if used)
2114 * prior to disabling HDMI.
2115 */
2116 if (crtc)
2117 funcs = crtc->helper_private;
2118 if (funcs && funcs->dpms)
2119 (*funcs->dpms)(crtc, mode);
2120
2102 hdmi_poweroff(display); 2121 hdmi_poweroff(display);
2103 break; 2122 break;
2104 default: 2123 default:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4c5aed7e54c8..7529946d0a74 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
377 mixer_regs_dump(ctx); 377 mixer_regs_dump(ctx);
378} 378}
379 379
380static void mixer_stop(struct mixer_context *ctx)
381{
382 struct mixer_resources *res = &ctx->mixer_res;
383 int timeout = 20;
384
385 mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
386
387 while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
388 --timeout)
389 usleep_range(10000, 12000);
390
391 mixer_regs_dump(ctx);
392}
393
380static void vp_video_buffer(struct mixer_context *ctx, int win) 394static void vp_video_buffer(struct mixer_context *ctx, int win)
381{ 395{
382 struct mixer_resources *res = &ctx->mixer_res; 396 struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
497static void mixer_layer_update(struct mixer_context *ctx) 511static void mixer_layer_update(struct mixer_context *ctx)
498{ 512{
499 struct mixer_resources *res = &ctx->mixer_res; 513 struct mixer_resources *res = &ctx->mixer_res;
500 u32 val;
501
502 val = mixer_reg_read(res, MXR_CFG);
503 514
504 /* allow one update per vsync only */ 515 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
505 if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
506 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
507} 516}
508 517
509static void mixer_graph_buffer(struct mixer_context *ctx, int win) 518static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1010 } 1019 }
1011 mutex_unlock(&mixer_ctx->mixer_mutex); 1020 mutex_unlock(&mixer_ctx->mixer_mutex);
1012 1021
1022 drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
1023
1013 atomic_set(&mixer_ctx->wait_vsync_event, 1); 1024 atomic_set(&mixer_ctx->wait_vsync_event, 1);
1014 1025
1015 /* 1026 /*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1020 !atomic_read(&mixer_ctx->wait_vsync_event), 1031 !atomic_read(&mixer_ctx->wait_vsync_event),
1021 HZ/20)) 1032 HZ/20))
1022 DRM_DEBUG_KMS("vblank wait timed out.\n"); 1033 DRM_DEBUG_KMS("vblank wait timed out.\n");
1034
1035 drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
1023} 1036}
1024 1037
1025static void mixer_window_suspend(struct exynos_drm_manager *mgr) 1038static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1061 mutex_unlock(&ctx->mixer_mutex); 1074 mutex_unlock(&ctx->mixer_mutex);
1062 return; 1075 return;
1063 } 1076 }
1064 ctx->powered = true; 1077
1065 mutex_unlock(&ctx->mixer_mutex); 1078 mutex_unlock(&ctx->mixer_mutex);
1066 1079
1067 pm_runtime_get_sync(ctx->dev); 1080 pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1072 clk_prepare_enable(res->sclk_mixer); 1085 clk_prepare_enable(res->sclk_mixer);
1073 } 1086 }
1074 1087
1088 mutex_lock(&ctx->mixer_mutex);
1089 ctx->powered = true;
1090 mutex_unlock(&ctx->mixer_mutex);
1091
1092 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1093
1075 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1094 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1076 mixer_win_reset(ctx); 1095 mixer_win_reset(ctx);
1077 1096
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
1084 struct mixer_resources *res = &ctx->mixer_res; 1103 struct mixer_resources *res = &ctx->mixer_res;
1085 1104
1086 mutex_lock(&ctx->mixer_mutex); 1105 mutex_lock(&ctx->mixer_mutex);
1087 if (!ctx->powered) 1106 if (!ctx->powered) {
1088 goto out; 1107 mutex_unlock(&ctx->mixer_mutex);
1108 return;
1109 }
1089 mutex_unlock(&ctx->mixer_mutex); 1110 mutex_unlock(&ctx->mixer_mutex);
1090 1111
1112 mixer_stop(ctx);
1091 mixer_window_suspend(mgr); 1113 mixer_window_suspend(mgr);
1092 1114
1093 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 1115 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
1094 1116
1117 mutex_lock(&ctx->mixer_mutex);
1118 ctx->powered = false;
1119 mutex_unlock(&ctx->mixer_mutex);
1120
1095 clk_disable_unprepare(res->mixer); 1121 clk_disable_unprepare(res->mixer);
1096 if (ctx->vp_enabled) { 1122 if (ctx->vp_enabled) {
1097 clk_disable_unprepare(res->vp); 1123 clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
1099 } 1125 }
1100 1126
1101 pm_runtime_put_sync(ctx->dev); 1127 pm_runtime_put_sync(ctx->dev);
1102
1103 mutex_lock(&ctx->mixer_mutex);
1104 ctx->powered = false;
1105
1106out:
1107 mutex_unlock(&ctx->mixer_mutex);
1108} 1128}
1109 1129
1110static void mixer_dpms(struct exynos_drm_manager *mgr, int mode) 1130static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 4537026bc385..5f32e1a29411 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -78,6 +78,7 @@
78#define MXR_STATUS_BIG_ENDIAN (1 << 3) 78#define MXR_STATUS_BIG_ENDIAN (1 << 3)
79#define MXR_STATUS_ENDIAN_MASK (1 << 3) 79#define MXR_STATUS_ENDIAN_MASK (1 << 3)
80#define MXR_STATUS_SYNC_ENABLE (1 << 2) 80#define MXR_STATUS_SYNC_ENABLE (1 << 2)
81#define MXR_STATUS_REG_IDLE (1 << 1)
81#define MXR_STATUS_REG_RUN (1 << 0) 82#define MXR_STATUS_REG_RUN (1 << 0)
82 83
83/* bits for MXR_CFG */ 84/* bits for MXR_CFG */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 601caa88c092..b8c689202c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
446 446
447 memset(&stats, 0, sizeof(stats)); 447 memset(&stats, 0, sizeof(stats));
448 stats.file_priv = file->driver_priv; 448 stats.file_priv = file->driver_priv;
449 spin_lock(&file->table_lock);
449 idr_for_each(&file->object_idr, per_file_stats, &stats); 450 idr_for_each(&file->object_idr, per_file_stats, &stats);
451 spin_unlock(&file->table_lock);
450 /* 452 /*
451 * Although we have a valid reference on file->pid, that does 453 * Although we have a valid reference on file->pid, that does
452 * not guarantee that the task_struct who called get_pid() is 454 * not guarantee that the task_struct who called get_pid() is
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 49414d30e8d4..a47fbf60b781 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -977,6 +977,8 @@ struct i915_power_well {
977 bool always_on; 977 bool always_on;
978 /* power well enable/disable usage count */ 978 /* power well enable/disable usage count */
979 int count; 979 int count;
980 /* cached hw enabled state */
981 bool hw_enabled;
980 unsigned long domains; 982 unsigned long domains;
981 unsigned long data; 983 unsigned long data;
982 const struct i915_power_well_ops *ops; 984 const struct i915_power_well_ops *ops;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3ffe308d5893..a5ddf3bce9c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
598 struct intel_context *from = ring->last_context; 598 struct intel_context *from = ring->last_context;
599 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); 599 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
600 u32 hw_flags = 0; 600 u32 hw_flags = 0;
601 bool uninitialized = false;
601 int ret, i; 602 int ret, i;
602 603
603 if (from != NULL && ring == &dev_priv->ring[RCS]) { 604 if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
696 i915_gem_context_unreference(from); 697 i915_gem_context_unreference(from);
697 } 698 }
698 699
700 uninitialized = !to->is_initialized && from == NULL;
701 to->is_initialized = true;
702
699done: 703done:
700 i915_gem_context_reference(to); 704 i915_gem_context_reference(to);
701 ring->last_context = to; 705 ring->last_context = to;
702 to->last_ring = ring; 706 to->last_ring = ring;
703 707
704 if (ring->id == RCS && !to->is_initialized && from == NULL) { 708 if (uninitialized) {
705 ret = i915_gem_render_state_init(ring); 709 ret = i915_gem_render_state_init(ring);
706 if (ret) 710 if (ret)
707 DRM_ERROR("init render state: %d\n", ret); 711 DRM_ERROR("init render state: %d\n", ret);
708 } 712 }
709 713
710 to->is_initialized = true;
711
712 return 0; 714 return 0;
713 715
714unpin_out: 716unpin_out:
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1ee98f121a00..827498e081df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
315 const struct bdb_lfp_backlight_data *backlight_data; 315 const struct bdb_lfp_backlight_data *backlight_data;
316 const struct bdb_lfp_backlight_data_entry *entry; 316 const struct bdb_lfp_backlight_data_entry *entry;
317 317
318 /* Err to enabling backlight if no backlight block. */
319 dev_priv->vbt.backlight.present = true;
320
321 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); 318 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
322 if (!backlight_data) 319 if (!backlight_data)
323 return; 320 return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1088 1085
1089 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 1086 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
1090 1087
1088 /* Default to having backlight */
1089 dev_priv->vbt.backlight.present = true;
1090
1091 /* LFP panel data */ 1091 /* LFP panel data */
1092 dev_priv->vbt.lvds_dither = 1; 1092 dev_priv->vbt.lvds_dither = 1;
1093 dev_priv->vbt.lvds_vbt = 0; 1093 dev_priv->vbt.lvds_vbt = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index efd3cf50cb0f..5f285fba4e41 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4564,7 +4564,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4564 if (intel_crtc->active) 4564 if (intel_crtc->active)
4565 return; 4565 return;
4566 4566
4567 vlv_prepare_pll(intel_crtc); 4567 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4568
4569 if (!is_dsi && !IS_CHERRYVIEW(dev))
4570 vlv_prepare_pll(intel_crtc);
4568 4571
4569 /* Set up the display plane register */ 4572 /* Set up the display plane register */
4570 dspcntr = DISPPLANE_GAMMA_ENABLE; 4573 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4601,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4598 if (encoder->pre_pll_enable) 4601 if (encoder->pre_pll_enable)
4599 encoder->pre_pll_enable(encoder); 4602 encoder->pre_pll_enable(encoder);
4600 4603
4601 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4602
4603 if (!is_dsi) { 4604 if (!is_dsi) {
4604 if (IS_CHERRYVIEW(dev)) 4605 if (IS_CHERRYVIEW(dev))
4605 chv_enable_pll(intel_crtc); 4606 chv_enable_pll(intel_crtc);
@@ -12411,8 +12412,8 @@ intel_display_capture_error_state(struct drm_device *dev)
12411 12412
12412 for_each_pipe(i) { 12413 for_each_pipe(i) {
12413 error->pipe[i].power_domain_on = 12414 error->pipe[i].power_domain_on =
12414 intel_display_power_enabled_sw(dev_priv, 12415 intel_display_power_enabled_unlocked(dev_priv,
12415 POWER_DOMAIN_PIPE(i)); 12416 POWER_DOMAIN_PIPE(i));
12416 if (!error->pipe[i].power_domain_on) 12417 if (!error->pipe[i].power_domain_on)
12417 continue; 12418 continue;
12418 12419
@@ -12447,7 +12448,7 @@ intel_display_capture_error_state(struct drm_device *dev)
12447 enum transcoder cpu_transcoder = transcoders[i]; 12448 enum transcoder cpu_transcoder = transcoders[i];
12448 12449
12449 error->transcoder[i].power_domain_on = 12450 error->transcoder[i].power_domain_on =
12450 intel_display_power_enabled_sw(dev_priv, 12451 intel_display_power_enabled_unlocked(dev_priv,
12451 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 12452 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
12452 if (!error->transcoder[i].power_domain_on) 12453 if (!error->transcoder[i].power_domain_on)
12453 continue; 12454 continue;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bda0ae3d80cc..eaa27ee9e367 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -950,8 +950,8 @@ int intel_power_domains_init(struct drm_i915_private *);
950void intel_power_domains_remove(struct drm_i915_private *); 950void intel_power_domains_remove(struct drm_i915_private *);
951bool intel_display_power_enabled(struct drm_i915_private *dev_priv, 951bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
952 enum intel_display_power_domain domain); 952 enum intel_display_power_domain domain);
953bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, 953bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
954 enum intel_display_power_domain domain); 954 enum intel_display_power_domain domain);
955void intel_display_power_get(struct drm_i915_private *dev_priv, 955void intel_display_power_get(struct drm_i915_private *dev_priv,
956 enum intel_display_power_domain domain); 956 enum intel_display_power_domain domain);
957void intel_display_power_put(struct drm_i915_private *dev_priv, 957void intel_display_power_put(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54242e4f6f4c..9ad0c6afc487 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5603,8 +5603,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5603 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5603 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5604} 5604}
5605 5605
5606bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, 5606bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
5607 enum intel_display_power_domain domain) 5607 enum intel_display_power_domain domain)
5608{ 5608{
5609 struct i915_power_domains *power_domains; 5609 struct i915_power_domains *power_domains;
5610 struct i915_power_well *power_well; 5610 struct i915_power_well *power_well;
@@ -5615,16 +5615,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5615 return false; 5615 return false;
5616 5616
5617 power_domains = &dev_priv->power_domains; 5617 power_domains = &dev_priv->power_domains;
5618
5618 is_enabled = true; 5619 is_enabled = true;
5620
5619 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 5621 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5620 if (power_well->always_on) 5622 if (power_well->always_on)
5621 continue; 5623 continue;
5622 5624
5623 if (!power_well->count) { 5625 if (!power_well->hw_enabled) {
5624 is_enabled = false; 5626 is_enabled = false;
5625 break; 5627 break;
5626 } 5628 }
5627 } 5629 }
5630
5628 return is_enabled; 5631 return is_enabled;
5629} 5632}
5630 5633
@@ -5632,30 +5635,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5632 enum intel_display_power_domain domain) 5635 enum intel_display_power_domain domain)
5633{ 5636{
5634 struct i915_power_domains *power_domains; 5637 struct i915_power_domains *power_domains;
5635 struct i915_power_well *power_well; 5638 bool ret;
5636 bool is_enabled;
5637 int i;
5638
5639 if (dev_priv->pm.suspended)
5640 return false;
5641 5639
5642 power_domains = &dev_priv->power_domains; 5640 power_domains = &dev_priv->power_domains;
5643 5641
5644 is_enabled = true;
5645
5646 mutex_lock(&power_domains->lock); 5642 mutex_lock(&power_domains->lock);
5647 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 5643 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
5648 if (power_well->always_on)
5649 continue;
5650
5651 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5652 is_enabled = false;
5653 break;
5654 }
5655 }
5656 mutex_unlock(&power_domains->lock); 5644 mutex_unlock(&power_domains->lock);
5657 5645
5658 return is_enabled; 5646 return ret;
5659} 5647}
5660 5648
5661/* 5649/*
@@ -5976,6 +5964,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
5976 if (!power_well->count++) { 5964 if (!power_well->count++) {
5977 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 5965 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5978 power_well->ops->enable(dev_priv, power_well); 5966 power_well->ops->enable(dev_priv, power_well);
5967 power_well->hw_enabled = true;
5979 } 5968 }
5980 5969
5981 check_power_well_state(dev_priv, power_well); 5970 check_power_well_state(dev_priv, power_well);
@@ -6005,6 +5994,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
6005 5994
6006 if (!--power_well->count && i915.disable_power_well) { 5995 if (!--power_well->count && i915.disable_power_well) {
6007 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 5996 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
5997 power_well->hw_enabled = false;
6008 power_well->ops->disable(dev_priv, power_well); 5998 power_well->ops->disable(dev_priv, power_well);
6009 } 5999 }
6010 6000
@@ -6267,8 +6257,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6267 int i; 6257 int i;
6268 6258
6269 mutex_lock(&power_domains->lock); 6259 mutex_lock(&power_domains->lock);
6270 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) 6260 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6271 power_well->ops->sync_hw(dev_priv, power_well); 6261 power_well->ops->sync_hw(dev_priv, power_well);
6262 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
6263 power_well);
6264 }
6272 mutex_unlock(&power_domains->lock); 6265 mutex_unlock(&power_domains->lock);
6273} 6266}
6274 6267
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ae750f6928c1..7f7aadef8a82 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
277 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; 277 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
278 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; 278 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
279 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; 279 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
280 static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
280 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; 281 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
281 282
282 config.phy_init = hdmi_phy_8x74_init; 283 config.phy_init = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
286 config.pwr_reg_names = pwr_reg_names; 287 config.pwr_reg_names = pwr_reg_names;
287 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); 288 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
288 config.hpd_clk_names = hpd_clk_names; 289 config.hpd_clk_names = hpd_clk_names;
290 config.hpd_freq = hpd_clk_freq;
289 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); 291 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
290 config.pwr_clk_names = pwr_clk_names; 292 config.pwr_clk_names = pwr_clk_names;
291 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); 293 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9fafee6a3e43..9d7723c6528a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
87 87
88 /* clks that need to be on for hpd: */ 88 /* clks that need to be on for hpd: */
89 const char **hpd_clk_names; 89 const char **hpd_clk_names;
90 const long unsigned *hpd_freq;
90 int hpd_clk_cnt; 91 int hpd_clk_cnt;
91 92
92 /* clks that need to be on for screen pwr (ie pixel clk): */ 93 /* clks that need to be on for screen pwr (ie pixel clk): */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e56a6196867c..28f7e3ec6c28 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
127 } 127 }
128 128
129 for (i = 0; i < config->hpd_clk_cnt; i++) { 129 for (i = 0; i < config->hpd_clk_cnt; i++) {
130 if (config->hpd_freq && config->hpd_freq[i]) {
131 ret = clk_set_rate(hdmi->hpd_clks[i],
132 config->hpd_freq[i]);
133 if (ret)
134 dev_warn(dev->dev, "failed to set clk %s (%d)\n",
135 config->hpd_clk_names[i], ret);
136 }
137
130 ret = clk_prepare_enable(hdmi->hpd_clks[i]); 138 ret = clk_prepare_enable(hdmi->hpd_clks[i]);
131 if (ret) { 139 if (ret) {
132 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", 140 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 42caf7fcb0b9..71510ee26e96 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -20,6 +20,10 @@
20#include "msm_mmu.h" 20#include "msm_mmu.h"
21#include "mdp5_kms.h" 21#include "mdp5_kms.h"
22 22
23static const char *iommu_ports[] = {
24 "mdp_0",
25};
26
23static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); 27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
24 28
25static int mdp5_hw_init(struct msm_kms *kms) 29static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
104static void mdp5_destroy(struct msm_kms *kms) 108static void mdp5_destroy(struct msm_kms *kms)
105{ 109{
106 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 110 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
111 struct msm_mmu *mmu = mdp5_kms->mmu;
112
113 if (mmu) {
114 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
115 mmu->funcs->destroy(mmu);
116 }
107 kfree(mdp5_kms); 117 kfree(mdp5_kms);
108} 118}
109 119
@@ -216,10 +226,6 @@ fail:
216 return ret; 226 return ret;
217} 227}
218 228
219static const char *iommu_ports[] = {
220 "mdp_0",
221};
222
223static int get_clk(struct platform_device *pdev, struct clk **clkp, 229static int get_clk(struct platform_device *pdev, struct clk **clkp,
224 const char *name) 230 const char *name)
225{ 231{
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
317 mmu = msm_iommu_new(dev, config->iommu); 323 mmu = msm_iommu_new(dev, config->iommu);
318 if (IS_ERR(mmu)) { 324 if (IS_ERR(mmu)) {
319 ret = PTR_ERR(mmu); 325 ret = PTR_ERR(mmu);
326 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
320 goto fail; 327 goto fail;
321 } 328 }
329
322 ret = mmu->funcs->attach(mmu, iommu_ports, 330 ret = mmu->funcs->attach(mmu, iommu_ports,
323 ARRAY_SIZE(iommu_ports)); 331 ARRAY_SIZE(iommu_ports));
324 if (ret) 332 if (ret) {
333 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
334 mmu->funcs->destroy(mmu);
325 goto fail; 335 goto fail;
336 }
326 } else { 337 } else {
327 dev_info(dev->dev, "no iommu, fallback to phys " 338 dev_info(dev->dev, "no iommu, fallback to phys "
328 "contig buffers for scanout\n"); 339 "contig buffers for scanout\n");
329 mmu = NULL; 340 mmu = NULL;
330 } 341 }
342 mdp5_kms->mmu = mmu;
331 343
332 mdp5_kms->id = msm_register_mmu(dev, mmu); 344 mdp5_kms->id = msm_register_mmu(dev, mmu);
333 if (mdp5_kms->id < 0) { 345 if (mdp5_kms->id < 0) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a2522c25..6e981b692d1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -33,6 +33,7 @@ struct mdp5_kms {
33 33
34 /* mapper-id used to request GEM buffer mapped for scanout: */ 34 /* mapper-id used to request GEM buffer mapped for scanout: */
35 int id; 35 int id;
36 struct msm_mmu *mmu;
36 37
37 /* for tracking smp allocation amongst pipes: */ 38 /* for tracking smp allocation amongst pipes: */
38 mdp5_smp_state_t smp_state; 39 mdp5_smp_state_t smp_state;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0d2562fb681e..9a5d87db5c23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
159static int get_mdp_ver(struct platform_device *pdev) 159static int get_mdp_ver(struct platform_device *pdev)
160{ 160{
161#ifdef CONFIG_OF 161#ifdef CONFIG_OF
162 const static struct of_device_id match_types[] = { { 162 static const struct of_device_id match_types[] = { {
163 .compatible = "qcom,mdss_mdp", 163 .compatible = "qcom,mdss_mdp",
164 .data = (void *)5, 164 .data = (void *)5,
165 }, { 165 }, {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index a752ab83b810..5107fc4826bc 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
59 struct drm_framebuffer *fb = NULL; 59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL; 60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0}; 61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr; 62 uint32_t paddr;
63 int ret, size; 63 int ret, size;
64 64
65 sizes->surface_bpp = 32; 65 sizes->surface_bpp = 32;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..690d7e7b6d1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova) 278 uint32_t *iova)
279{ 279{
280 struct msm_gem_object *msm_obj = to_msm_bo(obj); 280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 struct drm_device *dev = obj->dev;
281 int ret = 0; 282 int ret = 0;
282 283
283 if (!msm_obj->domain[id].iova) { 284 if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
285 struct msm_mmu *mmu = priv->mmus[id]; 286 struct msm_mmu *mmu = priv->mmus[id];
286 struct page **pages = get_pages(obj); 287 struct page **pages = get_pages(obj);
287 288
289 if (!mmu) {
290 dev_err(dev->dev, "null MMU pointer\n");
291 return -EINVAL;
292 }
293
288 if (IS_ERR(pages)) 294 if (IS_ERR(pages))
289 return PTR_ERR(pages); 295 return PTR_ERR(pages);
290 296
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 92b745986231..4b2ad9181edf 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 unsigned long iova, int flags, void *arg) 28 unsigned long iova, int flags, void *arg)
29{ 29{
30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags); 30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
31 return 0; 31 return -ENOSYS;
32} 32}
33 33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) 34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
40 for (i = 0; i < cnt; i++) { 40 for (i = 0; i < cnt; i++) {
41 struct device *msm_iommu_get_ctx(const char *ctx_name); 41 struct device *msm_iommu_get_ctx(const char *ctx_name);
42 struct device *ctx = msm_iommu_get_ctx(names[i]); 42 struct device *ctx = msm_iommu_get_ctx(names[i]);
43 if (IS_ERR_OR_NULL(ctx)) 43 if (IS_ERR_OR_NULL(ctx)) {
44 dev_warn(dev->dev, "couldn't get %s context", names[i]);
44 continue; 45 continue;
46 }
45 ret = iommu_attach_device(iommu->domain, ctx); 47 ret = iommu_attach_device(iommu->domain, ctx);
46 if (ret) { 48 if (ret) {
47 dev_warn(dev->dev, "could not attach iommu to %s", names[i]); 49 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
52 return 0; 54 return 0;
53} 55}
54 56
57static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
58{
59 struct msm_iommu *iommu = to_msm_iommu(mmu);
60 int i;
61
62 for (i = 0; i < cnt; i++) {
63 struct device *msm_iommu_get_ctx(const char *ctx_name);
64 struct device *ctx = msm_iommu_get_ctx(names[i]);
65 if (IS_ERR_OR_NULL(ctx))
66 continue;
67 iommu_detach_device(iommu->domain, ctx);
68 }
69}
70
55static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, 71static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
56 struct sg_table *sgt, unsigned len, int prot) 72 struct sg_table *sgt, unsigned len, int prot)
57{ 73{
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
110 126
111 VERB("unmap[%d]: %08x(%x)", i, iova, bytes); 127 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
112 128
113 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); 129 BUG_ON(!PAGE_ALIGNED(bytes));
114 130
115 da += bytes; 131 da += bytes;
116 } 132 }
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
127 143
128static const struct msm_mmu_funcs funcs = { 144static const struct msm_mmu_funcs funcs = {
129 .attach = msm_iommu_attach, 145 .attach = msm_iommu_attach,
146 .detach = msm_iommu_detach,
130 .map = msm_iommu_map, 147 .map = msm_iommu_map,
131 .unmap = msm_iommu_unmap, 148 .unmap = msm_iommu_unmap,
132 .destroy = msm_iommu_destroy, 149 .destroy = msm_iommu_destroy,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 030324482b4a..21da6d154f71 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -22,6 +22,7 @@
22 22
23struct msm_mmu_funcs { 23struct msm_mmu_funcs {
24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); 24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
25 void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
25 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
26 unsigned len, int prot); 27 unsigned len, int prot);
27 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 08531a128f53..02d3d85829f3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1052,7 +1052,7 @@ config SENSORS_PC87427
1052 will be called pc87427. 1052 will be called pc87427.
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support from Murata"
1056 depends on !OF || IIO=n || IIO 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
@@ -1060,7 +1060,8 @@ config SENSORS_NTC_THERMISTOR
1060 send notifications about the temperature. 1060 send notifications about the temperature.
1061 1061
1062 Currently, this driver supports 1062 Currently, this driver supports
1063 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333. 1063 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333
1064 from Murata.
1064 1065
1065 This driver can also be built as a module. If so, the module 1066 This driver can also be built as a module. If so, the module
1066 will be called ntc-thermistor. 1067 will be called ntc-thermistor.
@@ -1176,6 +1177,7 @@ config SENSORS_DME1737
1176config SENSORS_EMC1403 1177config SENSORS_EMC1403
1177 tristate "SMSC EMC1403/23 thermal sensor" 1178 tristate "SMSC EMC1403/23 thermal sensor"
1178 depends on I2C 1179 depends on I2C
1180 select REGMAP_I2C
1179 help 1181 help
1180 If you say yes here you get support for the SMSC EMC1403/23 1182 If you say yes here you get support for the SMSC EMC1403/23
1181 temperature monitoring chip. 1183 temperature monitoring chip.
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index ba35e4d530b5..2566c43dd1e9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -538,7 +538,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
538 538
539 /* Make this driver part of hwmon class. */ 539 /* Make this driver part of hwmon class. */
540 fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 540 fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
541 "gpio-fan", fan_data, 541 "gpio_fan", fan_data,
542 gpio_fan_groups); 542 gpio_fan_groups);
543 if (IS_ERR(fan_data->hwmon_dev)) 543 if (IS_ERR(fan_data->hwmon_dev))
544 return PTR_ERR(fan_data->hwmon_dev); 544 return PTR_ERR(fan_data->hwmon_dev);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e76feb86a1d4..bdfbe9114889 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -163,6 +163,18 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163} 163}
164 164
165static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
166 { .compatible = "murata,ncp15wb473",
167 .data = &ntc_thermistor_id[0] },
168 { .compatible = "murata,ncp18wb473",
169 .data = &ntc_thermistor_id[1] },
170 { .compatible = "murata,ncp21wb473",
171 .data = &ntc_thermistor_id[2] },
172 { .compatible = "murata,ncp03wb473",
173 .data = &ntc_thermistor_id[3] },
174 { .compatible = "murata,ncp15wl333",
175 .data = &ntc_thermistor_id[4] },
176
177 /* Usage of vendor name "ntc" is deprecated */
166 { .compatible = "ntc,ncp15wb473", 178 { .compatible = "ntc,ncp15wb473",
167 .data = &ntc_thermistor_id[0] }, 179 .data = &ntc_thermistor_id[0] },
168 { .compatible = "ntc,ncp18wb473", 180 { .compatible = "ntc,ncp18wb473",
@@ -534,7 +546,7 @@ static struct platform_driver ntc_thermistor_driver = {
534 546
535module_platform_driver(ntc_thermistor_driver); 547module_platform_driver(ntc_thermistor_driver);
536 548
537MODULE_DESCRIPTION("NTC Thermistor Driver"); 549MODULE_DESCRIPTION("NTC Thermistor Driver from Murata");
538MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 550MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
539MODULE_LICENSE("GPL"); 551MODULE_LICENSE("GPL");
540MODULE_ALIAS("platform:ntc-thermistor"); 552MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 6ed76ceb9270..32487c19cbfc 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -249,7 +249,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
249 int nr = to_sensor_dev_attr(attr)->index; \ 249 int nr = to_sensor_dev_attr(attr)->index; \
250 struct w83l786ng_data *data = w83l786ng_update_device(dev); \ 250 struct w83l786ng_data *data = w83l786ng_update_device(dev); \
251 return sprintf(buf, "%d\n", \ 251 return sprintf(buf, "%d\n", \
252 FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); \ 252 FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
253} 253}
254 254
255show_fan_reg(fan); 255show_fan_reg(fan);
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d4daa05efe60..499b4366a98d 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,7 +45,7 @@ struct pri_queue {
45struct pasid_state { 45struct pasid_state {
46 struct list_head list; /* For global state-list */ 46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */ 47 atomic_t count; /* Reference count */
48 atomic_t mmu_notifier_count; /* Counting nested mmu_notifier 48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
49 calls */ 49 calls */
50 struct task_struct *task; /* Task bound to this PASID */ 50 struct task_struct *task; /* Task bound to this PASID */
51 struct mm_struct *mm; /* mm_struct for the faults */ 51 struct mm_struct *mm; /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ 53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */ 54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */ 55 int pasid; /* PASID index */
56 spinlock_t lock; /* Protect pri_queues */ 56 spinlock_t lock; /* Protect pri_queues and
57 mmu_notifer_count */
57 wait_queue_head_t wq; /* To wait for count == 0 */ 58 wait_queue_head_t wq; /* To wait for count == 0 */
58}; 59};
59 60
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
431{ 432{
432 struct pasid_state *pasid_state; 433 struct pasid_state *pasid_state;
433 struct device_state *dev_state; 434 struct device_state *dev_state;
435 unsigned long flags;
434 436
435 pasid_state = mn_to_state(mn); 437 pasid_state = mn_to_state(mn);
436 dev_state = pasid_state->device_state; 438 dev_state = pasid_state->device_state;
437 439
438 if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) { 440 spin_lock_irqsave(&pasid_state->lock, flags);
441 if (pasid_state->mmu_notifier_count == 0) {
439 amd_iommu_domain_set_gcr3(dev_state->domain, 442 amd_iommu_domain_set_gcr3(dev_state->domain,
440 pasid_state->pasid, 443 pasid_state->pasid,
441 __pa(empty_page_table)); 444 __pa(empty_page_table));
442 } 445 }
446 pasid_state->mmu_notifier_count += 1;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
443} 448}
444 449
445static void mn_invalidate_range_end(struct mmu_notifier *mn, 450static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
448{ 453{
449 struct pasid_state *pasid_state; 454 struct pasid_state *pasid_state;
450 struct device_state *dev_state; 455 struct device_state *dev_state;
456 unsigned long flags;
451 457
452 pasid_state = mn_to_state(mn); 458 pasid_state = mn_to_state(mn);
453 dev_state = pasid_state->device_state; 459 dev_state = pasid_state->device_state;
454 460
455 if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) { 461 spin_lock_irqsave(&pasid_state->lock, flags);
462 pasid_state->mmu_notifier_count -= 1;
463 if (pasid_state->mmu_notifier_count == 0) {
456 amd_iommu_domain_set_gcr3(dev_state->domain, 464 amd_iommu_domain_set_gcr3(dev_state->domain,
457 pasid_state->pasid, 465 pasid_state->pasid,
458 __pa(pasid_state->mm->pgd)); 466 __pa(pasid_state->mm->pgd));
459 } 467 }
468 spin_unlock_irqrestore(&pasid_state->lock, flags);
460} 469}
461 470
462static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) 471static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
650 goto out; 659 goto out;
651 660
652 atomic_set(&pasid_state->count, 1); 661 atomic_set(&pasid_state->count, 1);
653 atomic_set(&pasid_state->mmu_notifier_count, 0);
654 init_waitqueue_head(&pasid_state->wq); 662 init_waitqueue_head(&pasid_state->wq);
655 spin_lock_init(&pasid_state->lock); 663 spin_lock_init(&pasid_state->lock);
656 664
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6bb32773c3ac..51b6b77dc3e5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3816 ((void *)rmrr) + rmrr->header.length, 3816 ((void *)rmrr) + rmrr->header.length,
3817 rmrr->segment, rmrru->devices, 3817 rmrr->segment, rmrru->devices,
3818 rmrru->devices_cnt); 3818 rmrru->devices_cnt);
3819 if (ret > 0) 3819 if(ret < 0)
3820 break;
3821 else if(ret < 0)
3822 return ret; 3820 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 3821 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824 if (dmar_remove_dev_scope(info, rmrr->segment, 3822 dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt)) 3823 rmrru->devices, rmrru->devices_cnt);
3826 break;
3827 } 3824 }
3828 } 3825 }
3829 3826
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index d9edcc94c2a8..97465ac5a2d5 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
16 also to the configuration option of the driver for your particular 16 also to the configuration option of the driver for your particular
17 card, below. 17 card, below.
18 18
19if ISDN_DRV_HISAX!=n 19if ISDN_DRV_HISAX
20 20
21comment "D-channel protocol features" 21comment "D-channel protocol features"
22 22
@@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
348 This enables HiSax support for the Formula-n enter:now PCI 348 This enables HiSax support for the Formula-n enter:now PCI
349 ISDN card. 349 ISDN card.
350 350
351endif
352
353if ISDN_DRV_HISAX
354
355config HISAX_DEBUG 351config HISAX_DEBUG
356 bool "HiSax debugging" 352 bool "HiSax debugging"
357 help 353 help
@@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
420 (the latter also needs you to select "ISA Plug and Play support" 416 (the latter also needs you to select "ISA Plug and Play support"
421 from the menu "Plug and Play configuration") 417 from the menu "Plug and Play configuration")
422 418
423config HISAX_AVM_A1_PCMCIA
424 bool
425 depends on HISAX_AVM_A1_CS
426 default y
427
428endif 419endif
429 420
430endmenu 421endmenu
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 23b4a3b28dbc..4eab93aa570b 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1257 if (pp->busy && pp->cmd.status != 1) 1257 if (pp->busy && pp->cmd.status != 1)
1258 mask |= POLLIN; 1258 mask |= POLLIN;
1259 spin_unlock_irqrestore(&pp->lock, flags); 1259 spin_unlock_irqrestore(&pp->lock, flags);
1260 } if (pp->mode == smu_file_events) { 1260 }
1261 if (pp->mode == smu_file_events) {
1261 /* Not yet implemented */ 1262 /* Not yet implemented */
1262 } 1263 }
1263 return mask; 1264 return mask;
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 2a635b6fdaf7..c880ba685754 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
601 pcr->slots[RTSX_MS_CARD].card_event = NULL; 601 pcr->slots[RTSX_MS_CARD].card_event = NULL;
602 msh = host->msh; 602 msh = host->msh;
603 host->eject = true; 603 host->eject = true;
604 cancel_work_sync(&host->handle_req);
604 605
605 mutex_lock(&host->host_mutex); 606 mutex_lock(&host->host_mutex);
606 if (host->req) { 607 if (host->req) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ee8204cc31e9..6cc4b6acc22a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -760,6 +760,7 @@ config MFD_SYSCON
760config MFD_DAVINCI_VOICECODEC 760config MFD_DAVINCI_VOICECODEC
761 tristate 761 tristate
762 select MFD_CORE 762 select MFD_CORE
763 select REGMAP_MMIO
763 764
764config MFD_TI_AM335X_TSCADC 765config MFD_TI_AM335X_TSCADC
765 tristate "TI ADC / Touch Screen chip support" 766 tristate "TI ADC / Touch Screen chip support"
@@ -1225,7 +1226,7 @@ config MFD_WM8994
1225 functionaltiy of the device other drivers must be enabled. 1226 functionaltiy of the device other drivers must be enabled.
1226 1227
1227config MFD_STW481X 1228config MFD_STW481X
1228 bool "Support for ST Microelectronics STw481x" 1229 tristate "Support for ST Microelectronics STw481x"
1229 depends on I2C && ARCH_NOMADIK 1230 depends on I2C && ARCH_NOMADIK
1230 select REGMAP_I2C 1231 select REGMAP_I2C
1231 select MFD_CORE 1232 select MFD_CORE
@@ -1248,7 +1249,7 @@ config MCP_SA11X0
1248 1249
1249# Chip drivers 1250# Chip drivers
1250config MCP_UCB1200 1251config MCP_UCB1200
1251 bool "Support for UCB1200 / UCB1300" 1252 tristate "Support for UCB1200 / UCB1300"
1252 depends on MCP_SA11X0 1253 depends on MCP_SA11X0
1253 select MCP 1254 select MCP
1254 1255
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index a8ee4a36a1d8..cf2e6a198c6b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -591,7 +591,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
591 num_irqs = AB8500_NR_IRQS; 591 num_irqs = AB8500_NR_IRQS;
592 592
593 /* If ->irq_base is zero this will give a linear mapping */ 593 /* If ->irq_base is zero this will give a linear mapping */
594 ab8500->domain = irq_domain_add_simple(NULL, 594 ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
595 num_irqs, 0, 595 num_irqs, 0,
596 &ab8500_irq_ops, ab8500); 596 &ab8500_irq_ops, ab8500);
597 597
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a43d0c467274..ee9402324a23 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
54config ATMEL_PWM 54config ATMEL_PWM
55 tristate "Atmel AT32/AT91 PWM support" 55 tristate "Atmel AT32/AT91 PWM support"
56 depends on HAVE_CLK 56 depends on HAVE_CLK
57 depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45 57 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
58 help 58 help
59 This option enables device driver support for the PWM channels 59 This option enables device driver support for the PWM channels
60 on certain Atmel processors. Pulse Width Modulation is used for 60 on certain Atmel processors. Pulse Width Modulation is used for
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 04f35f960cb8..3a451b6cd3d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1025,10 +1025,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
1025 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 1025 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1026 NETIF_F_HIGHDMA | NETIF_F_LRO) 1026 NETIF_F_HIGHDMA | NETIF_F_LRO)
1027 1027
1028#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
1029 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
1030
1028static void bond_compute_features(struct bonding *bond) 1031static void bond_compute_features(struct bonding *bond)
1029{ 1032{
1030 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 1033 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1031 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1034 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1035 netdev_features_t enc_features = BOND_ENC_FEATURES;
1032 struct net_device *bond_dev = bond->dev; 1036 struct net_device *bond_dev = bond->dev;
1033 struct list_head *iter; 1037 struct list_head *iter;
1034 struct slave *slave; 1038 struct slave *slave;
@@ -1044,6 +1048,9 @@ static void bond_compute_features(struct bonding *bond)
1044 vlan_features = netdev_increment_features(vlan_features, 1048 vlan_features = netdev_increment_features(vlan_features,
1045 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1049 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1046 1050
1051 enc_features = netdev_increment_features(enc_features,
1052 slave->dev->hw_enc_features,
1053 BOND_ENC_FEATURES);
1047 dst_release_flag &= slave->dev->priv_flags; 1054 dst_release_flag &= slave->dev->priv_flags;
1048 if (slave->dev->hard_header_len > max_hard_header_len) 1055 if (slave->dev->hard_header_len > max_hard_header_len)
1049 max_hard_header_len = slave->dev->hard_header_len; 1056 max_hard_header_len = slave->dev->hard_header_len;
@@ -1054,6 +1061,7 @@ static void bond_compute_features(struct bonding *bond)
1054 1061
1055done: 1062done:
1056 bond_dev->vlan_features = vlan_features; 1063 bond_dev->vlan_features = vlan_features;
1064 bond_dev->hw_enc_features = enc_features;
1057 bond_dev->hard_header_len = max_hard_header_len; 1065 bond_dev->hard_header_len = max_hard_header_len;
1058 bond_dev->gso_max_segs = gso_max_segs; 1066 bond_dev->gso_max_segs = gso_max_segs;
1059 netif_set_gso_max_size(bond_dev, gso_max_size); 1067 netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -3975,6 +3983,7 @@ void bond_setup(struct net_device *bond_dev)
3975 NETIF_F_HW_VLAN_CTAG_FILTER; 3983 NETIF_F_HW_VLAN_CTAG_FILTER;
3976 3984
3977 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); 3985 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
3986 bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3978 bond_dev->features |= bond_dev->hw_features; 3987 bond_dev->features |= bond_dev->hw_features;
3979} 3988}
3980 3989
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index dcf9196f6316..ea4d4f1a6411 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -52,6 +52,7 @@
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
55#include <linux/workqueue.h>
55#include <linux/can.h> 56#include <linux/can.h>
56#include <linux/can/skb.h> 57#include <linux/can/skb.h>
57 58
@@ -85,6 +86,7 @@ struct slcan {
85 struct tty_struct *tty; /* ptr to TTY structure */ 86 struct tty_struct *tty; /* ptr to TTY structure */
86 struct net_device *dev; /* easy for intr handling */ 87 struct net_device *dev; /* easy for intr handling */
87 spinlock_t lock; 88 spinlock_t lock;
89 struct work_struct tx_work; /* Flushes transmit buffer */
88 90
89 /* These are pointers to the malloc()ed frame buffers. */ 91 /* These are pointers to the malloc()ed frame buffers. */
90 unsigned char rbuff[SLC_MTU]; /* receiver buffer */ 92 unsigned char rbuff[SLC_MTU]; /* receiver buffer */
@@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
309 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
310} 312}
311 313
312/* 314/* Write out any remaining transmit buffer. Scheduled when tty is writable */
313 * Called by the driver when there's room for more data. If we have 315static void slcan_transmit(struct work_struct *work)
314 * more packets to send, we send them here.
315 */
316static void slcan_write_wakeup(struct tty_struct *tty)
317{ 316{
317 struct slcan *sl = container_of(work, struct slcan, tx_work);
318 int actual; 318 int actual;
319 struct slcan *sl = (struct slcan *) tty->disc_data;
320 319
320 spin_lock_bh(&sl->lock);
321 /* First make sure we're connected. */ 321 /* First make sure we're connected. */
322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 322 if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
323 spin_unlock_bh(&sl->lock);
323 return; 324 return;
325 }
324 326
325 spin_lock_bh(&sl->lock);
326 if (sl->xleft <= 0) { 327 if (sl->xleft <= 0) {
327 /* Now serial buffer is almost free & we can start 328 /* Now serial buffer is almost free & we can start
328 * transmission of another packet */ 329 * transmission of another packet */
329 sl->dev->stats.tx_packets++; 330 sl->dev->stats.tx_packets++;
330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 331 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
331 spin_unlock_bh(&sl->lock); 332 spin_unlock_bh(&sl->lock);
332 netif_wake_queue(sl->dev); 333 netif_wake_queue(sl->dev);
333 return; 334 return;
334 } 335 }
335 336
336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 337 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
337 sl->xleft -= actual; 338 sl->xleft -= actual;
338 sl->xhead += actual; 339 sl->xhead += actual;
339 spin_unlock_bh(&sl->lock); 340 spin_unlock_bh(&sl->lock);
340} 341}
341 342
343/*
344 * Called by the driver when there's room for more data.
345 * Schedule the transmit.
346 */
347static void slcan_write_wakeup(struct tty_struct *tty)
348{
349 struct slcan *sl = tty->disc_data;
350
351 schedule_work(&sl->tx_work);
352}
353
342/* Send a can_frame to a TTY queue. */ 354/* Send a can_frame to a TTY queue. */
343static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) 355static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
344{ 356{
@@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
528 sl->magic = SLCAN_MAGIC; 540 sl->magic = SLCAN_MAGIC;
529 sl->dev = dev; 541 sl->dev = dev;
530 spin_lock_init(&sl->lock); 542 spin_lock_init(&sl->lock);
543 INIT_WORK(&sl->tx_work, slcan_transmit);
531 slcan_devs[i] = dev; 544 slcan_devs[i] = dev;
532 545
533 return sl; 546 return sl;
@@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
626 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) 639 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
627 return; 640 return;
628 641
642 spin_lock_bh(&sl->lock);
629 tty->disc_data = NULL; 643 tty->disc_data = NULL;
630 sl->tty = NULL; 644 sl->tty = NULL;
645 spin_unlock_bh(&sl->lock);
646
647 flush_work(&sl->tx_work);
631 648
632 /* Flush network side */ 649 /* Flush network side */
633 unregister_netdev(sl->dev); 650 unregister_netdev(sl->dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 28460676b8ca..d81e7167a8b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
736 736
737 ret = emac_mdio_probe(dev); 737 ret = emac_mdio_probe(dev);
738 if (ret < 0) { 738 if (ret < 0) {
739 free_irq(dev->irq, dev);
739 netdev_err(dev, "cannot probe MDIO bus\n"); 740 netdev_err(dev, "cannot probe MDIO bus\n");
740 return ret; 741 return ret;
741 } 742 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index df2792d8383d..8afa579e7c40 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3224 return 0; 3224 return 0;
3225} 3225}
3226 3226
3227#define NVRAM_CMD_TIMEOUT 100 3227#define NVRAM_CMD_TIMEOUT 5000
3228 3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{ 3230{
@@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 3232
3233 tw32(NVRAM_CMD, nvram_cmd); 3233 tw32(NVRAM_CMD, nvram_cmd);
3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235 udelay(10); 3235 usleep_range(10, 40);
3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 udelay(10); 3237 udelay(10);
3238 break; 3238 break;
@@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7854 netif_wake_queue(tp->dev); 7854 netif_wake_queue(tp->dev);
7855 } 7855 }
7856 7856
7857 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); 7857 segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
7858 if (IS_ERR(segs)) 7858 if (IS_ERR(segs) || !segs)
7859 goto tg3_tso_bug_end; 7859 goto tg3_tso_bug_end;
7860 7860
7861 do { 7861 do {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f8d6b910383..a83271cf17c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4057,22 +4057,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
4057EXPORT_SYMBOL(cxgb4_unregister_uld); 4057EXPORT_SYMBOL(cxgb4_unregister_uld);
4058 4058
4059/* Check if netdev on which event is occured belongs to us or not. Return 4059/* Check if netdev on which event is occured belongs to us or not. Return
4060 * suceess (1) if it belongs otherwise failure (0). 4060 * success (true) if it belongs otherwise failure (false).
4061 * Called with rcu_read_lock() held.
4061 */ 4062 */
4062static int cxgb4_netdev(struct net_device *netdev) 4063static bool cxgb4_netdev(const struct net_device *netdev)
4063{ 4064{
4064 struct adapter *adap; 4065 struct adapter *adap;
4065 int i; 4066 int i;
4066 4067
4067 spin_lock(&adap_rcu_lock);
4068 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) 4068 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4069 for (i = 0; i < MAX_NPORTS; i++) 4069 for (i = 0; i < MAX_NPORTS; i++)
4070 if (adap->port[i] == netdev) { 4070 if (adap->port[i] == netdev)
4071 spin_unlock(&adap_rcu_lock); 4071 return true;
4072 return 1; 4072 return false;
4073 }
4074 spin_unlock(&adap_rcu_lock);
4075 return 0;
4076} 4073}
4077 4074
4078static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, 4075static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
@@ -6396,6 +6393,7 @@ static void remove_one(struct pci_dev *pdev)
6396 adapter->flags &= ~DEV_ENABLED; 6393 adapter->flags &= ~DEV_ENABLED;
6397 } 6394 }
6398 pci_release_regions(pdev); 6395 pci_release_regions(pdev);
6396 synchronize_rcu();
6399 kfree(adapter); 6397 kfree(adapter);
6400 } else 6398 } else
6401 pci_release_regions(pdev); 6399 pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index bba67681aeaa..931478e7bd28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3962,6 +3962,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3962 p->lport = j; 3962 p->lport = j;
3963 p->rss_size = rss_size; 3963 p->rss_size = rss_size;
3964 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3964 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3965 adap->port[i]->dev_port = j;
3965 3966
3966 ret = ntohl(c.u.info.lstatus_to_modtype); 3967 ret = ntohl(c.u.info.lstatus_to_modtype);
3967 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3968 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 768379b8aee9..523d9dde50a2 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
158{ 158{
159 struct net_device *dev = (struct net_device *)data; 159 struct net_device *dev = (struct net_device *)data;
160 struct tulip_private *tp = netdev_priv(dev); 160 struct tulip_private *tp = netdev_priv(dev);
161 int next_tick = 60*HZ; 161 int next_tick = 2*HZ;
162 162
163 if (tulip_debug > 1) 163 if (tulip_debug > 1)
164 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n", 164 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 2e7c5553955e..c2f5d2d3b932 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -557,9 +557,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
557#define be_pvid_tagging_enabled(adapter) (adapter->pvid) 557#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
558 558
559/* Is BE in QNQ multi-channel mode */ 559/* Is BE in QNQ multi-channel mode */
560#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \ 560#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
561 adapter->mc_type == vNIC1 || \
562 adapter->mc_type == UFP)
563 561
564#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 562#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
565 adapter->pdev->device == OC_DEVICE_ID4) 563 adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 3e0a6b243806..59b3c056f329 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1091,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {
1091 * based on the skew/IPL. 1091 * based on the skew/IPL.
1092 */ 1092 */
1093#define RDMA_ENABLED 0x4 1093#define RDMA_ENABLED 0x4
1094#define FLEX10_MODE 0x400 1094#define QNQ_MODE 0x400
1095#define VNIC_MODE 0x20000 1095#define VNIC_MODE 0x20000
1096#define UMC_ENABLED 0x1000000 1096#define UMC_ENABLED 0x1000000
1097struct be_cmd_req_query_fw_cfg { 1097struct be_cmd_req_query_fw_cfg {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6822b3d76d85..34a26e42f19d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3254,9 +3254,9 @@ err:
3254 3254
3255static u8 be_convert_mc_type(u32 function_mode) 3255static u8 be_convert_mc_type(u32 function_mode)
3256{ 3256{
3257 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE) 3257 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3258 return vNIC1; 3258 return vNIC1;
3259 else if (function_mode & FLEX10_MODE) 3259 else if (function_mode & QNQ_MODE)
3260 return FLEX10; 3260 return FLEX10;
3261 else if (function_mode & VNIC_MODE) 3261 else if (function_mode & VNIC_MODE)
3262 return vNIC2; 3262 return vNIC2;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38d9d276ab8b..77037fd377b8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -320,6 +320,11 @@ static void *swap_buffer(void *bufaddr, int len)
320 return bufaddr; 320 return bufaddr;
321} 321}
322 322
323static inline bool is_ipv4_pkt(struct sk_buff *skb)
324{
325 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
326}
327
323static int 328static int
324fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 329fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
325{ 330{
@@ -330,7 +335,8 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
330 if (unlikely(skb_cow_head(skb, 0))) 335 if (unlikely(skb_cow_head(skb, 0)))
331 return -1; 336 return -1;
332 337
333 ip_hdr(skb)->check = 0; 338 if (is_ipv4_pkt(skb))
339 ip_hdr(skb)->check = 0;
334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 340 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
335 341
336 return 0; 342 return 0;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 7f81ae66cc89..e912b6887d40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
4199 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") 4199 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
4200 }, 4200 },
4201 }, 4201 },
4202 {
4203 .ident = "FUJITSU SIEMENS A8NE-FM",
4204 .matches = {
4205 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
4206 DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
4207 },
4208 },
4202 {} 4209 {}
4203}; 4210};
4204 4211
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5f42f6d6e4c6..82ab427290c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2439,7 +2439,8 @@ slave_start:
2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2440 mlx4_err(dev, 2440 mlx4_err(dev,
2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2442 goto err_close; 2442 err = -EINVAL;
2443 goto err_master_mfunc;
2443 } 2444 }
2444 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2445 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2445 unsigned j; 2446 unsigned j;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ff380dac6629..b988d16cd34e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1212,7 +1212,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1212 for_each_slave(priv, cpsw_slave_open, priv); 1212 for_each_slave(priv, cpsw_slave_open, priv);
1213 1213
1214 /* Add default VLAN */ 1214 /* Add default VLAN */
1215 cpsw_add_default_vlan(priv); 1215 if (!priv->data.dual_emac)
1216 cpsw_add_default_vlan(priv);
1217 else
1218 cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
1219 ALE_ALL_PORTS << priv->host_port,
1220 ALE_ALL_PORTS << priv->host_port, 0, 0);
1216 1221
1217 if (!cpsw_common_res_usage_state(priv)) { 1222 if (!cpsw_common_res_usage_state(priv)) {
1218 /* setup tx dma to fixed prio and zero offset */ 1223 /* setup tx dma to fixed prio and zero offset */
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 14389f841d43..4c70360967c2 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2191,7 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
2191static void tile_net_dev_init(const char *name, const uint8_t *mac) 2191static void tile_net_dev_init(const char *name, const uint8_t *mac)
2192{ 2192{
2193 int ret; 2193 int ret;
2194 int i;
2195 struct net_device *dev; 2194 struct net_device *dev;
2196 struct tile_net_priv *priv; 2195 struct tile_net_priv *priv;
2197 2196
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index c041f63a6d30..4ed38eaecea8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -189,7 +189,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
189 "unable to teardown send buffer's gpadl\n"); 189 "unable to teardown send buffer's gpadl\n");
190 return ret; 190 return ret;
191 } 191 }
192 net_device->recv_buf_gpadl_handle = 0; 192 net_device->send_buf_gpadl_handle = 0;
193 } 193 }
194 if (net_device->send_buf) { 194 if (net_device->send_buf) {
195 /* Free up the receive buffer */ 195 /* Free up the receive buffer */
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 4517b149ed07..50899416f668 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1137,6 +1137,8 @@ static int at86rf230_probe(struct spi_device *spi)
1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK; 1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
1138 1138
1139 irq_type = irq_get_trigger_type(spi->irq); 1139 irq_type = irq_get_trigger_type(spi->irq);
1140 if (!irq_type)
1141 irq_type = IRQF_TRIGGER_RISING;
1140 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 1142 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
1141 irq_worker = at86rf230_irqwork; 1143 irq_worker = at86rf230_irqwork;
1142 irq_handler = at86rf230_isr; 1144 irq_handler = at86rf230_isr;
@@ -1168,7 +1170,8 @@ static int at86rf230_probe(struct spi_device *spi)
1168 if (rc) 1170 if (rc)
1169 goto err_hw_init; 1171 goto err_hw_init;
1170 1172
1171 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED, 1173 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
1174 IRQF_SHARED | irq_type,
1172 dev_name(&spi->dev), lp); 1175 dev_name(&spi->dev), lp);
1173 if (rc) 1176 if (rc)
1174 goto err_hw_init; 1177 goto err_hw_init;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6c622aedbae1..fdc1b418fa6a 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -16,9 +16,13 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/of_gpio.h>
20#include <linux/gpio/consumer.h>
19 21
20#define AT803X_INTR_ENABLE 0x12 22#define AT803X_INTR_ENABLE 0x12
21#define AT803X_INTR_STATUS 0x13 23#define AT803X_INTR_STATUS 0x13
24#define AT803X_SMART_SPEED 0x14
25#define AT803X_LED_CONTROL 0x18
22#define AT803X_WOL_ENABLE 0x01 26#define AT803X_WOL_ENABLE 0x01
23#define AT803X_DEVICE_ADDR 0x03 27#define AT803X_DEVICE_ADDR 0x03
24#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C 28#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
@@ -35,10 +39,52 @@
35#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05 39#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
36#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8) 40#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
37 41
42#define ATH8030_PHY_ID 0x004dd076
43#define ATH8031_PHY_ID 0x004dd074
44#define ATH8035_PHY_ID 0x004dd072
45
38MODULE_DESCRIPTION("Atheros 803x PHY driver"); 46MODULE_DESCRIPTION("Atheros 803x PHY driver");
39MODULE_AUTHOR("Matus Ujhelyi"); 47MODULE_AUTHOR("Matus Ujhelyi");
40MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
41 49
50struct at803x_priv {
51 bool phy_reset:1;
52 struct gpio_desc *gpiod_reset;
53};
54
55struct at803x_context {
56 u16 bmcr;
57 u16 advertise;
58 u16 control1000;
59 u16 int_enable;
60 u16 smart_speed;
61 u16 led_control;
62};
63
64/* save relevant PHY registers to private copy */
65static void at803x_context_save(struct phy_device *phydev,
66 struct at803x_context *context)
67{
68 context->bmcr = phy_read(phydev, MII_BMCR);
69 context->advertise = phy_read(phydev, MII_ADVERTISE);
70 context->control1000 = phy_read(phydev, MII_CTRL1000);
71 context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
72 context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
73 context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
74}
75
76/* restore relevant PHY registers from private copy */
77static void at803x_context_restore(struct phy_device *phydev,
78 const struct at803x_context *context)
79{
80 phy_write(phydev, MII_BMCR, context->bmcr);
81 phy_write(phydev, MII_ADVERTISE, context->advertise);
82 phy_write(phydev, MII_CTRL1000, context->control1000);
83 phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
84 phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
85 phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
86}
87
42static int at803x_set_wol(struct phy_device *phydev, 88static int at803x_set_wol(struct phy_device *phydev,
43 struct ethtool_wolinfo *wol) 89 struct ethtool_wolinfo *wol)
44{ 90{
@@ -142,6 +188,26 @@ static int at803x_resume(struct phy_device *phydev)
142 return 0; 188 return 0;
143} 189}
144 190
191static int at803x_probe(struct phy_device *phydev)
192{
193 struct device *dev = &phydev->dev;
194 struct at803x_priv *priv;
195
196 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
197 if (!priv)
198 return -ENOMEM;
199
200 priv->gpiod_reset = devm_gpiod_get(dev, "reset");
201 if (IS_ERR(priv->gpiod_reset))
202 priv->gpiod_reset = NULL;
203 else
204 gpiod_direction_output(priv->gpiod_reset, 1);
205
206 phydev->priv = priv;
207
208 return 0;
209}
210
145static int at803x_config_init(struct phy_device *phydev) 211static int at803x_config_init(struct phy_device *phydev)
146{ 212{
147 int ret; 213 int ret;
@@ -189,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
189 return err; 255 return err;
190} 256}
191 257
258static void at803x_link_change_notify(struct phy_device *phydev)
259{
260 struct at803x_priv *priv = phydev->priv;
261
262 /*
263 * Conduct a hardware reset for AT8030 every time a link loss is
264 * signalled. This is necessary to circumvent a hardware bug that
265 * occurs when the cable is unplugged while TX packets are pending
266 * in the FIFO. In such cases, the FIFO enters an error mode it
267 * cannot recover from by software.
268 */
269 if (phydev->drv->phy_id == ATH8030_PHY_ID) {
270 if (phydev->state == PHY_NOLINK) {
271 if (priv->gpiod_reset && !priv->phy_reset) {
272 struct at803x_context context;
273
274 at803x_context_save(phydev, &context);
275
276 gpiod_set_value(priv->gpiod_reset, 0);
277 msleep(1);
278 gpiod_set_value(priv->gpiod_reset, 1);
279 msleep(1);
280
281 at803x_context_restore(phydev, &context);
282
283 dev_dbg(&phydev->dev, "%s(): phy was reset\n",
284 __func__);
285 priv->phy_reset = true;
286 }
287 } else {
288 priv->phy_reset = false;
289 }
290 }
291}
292
192static struct phy_driver at803x_driver[] = { 293static struct phy_driver at803x_driver[] = {
193{ 294{
194 /* ATHEROS 8035 */ 295 /* ATHEROS 8035 */
195 .phy_id = 0x004dd072, 296 .phy_id = ATH8035_PHY_ID,
196 .name = "Atheros 8035 ethernet", 297 .name = "Atheros 8035 ethernet",
197 .phy_id_mask = 0xffffffef, 298 .phy_id_mask = 0xffffffef,
198 .config_init = at803x_config_init, 299 .probe = at803x_probe,
199 .set_wol = at803x_set_wol, 300 .config_init = at803x_config_init,
200 .get_wol = at803x_get_wol, 301 .link_change_notify = at803x_link_change_notify,
201 .suspend = at803x_suspend, 302 .set_wol = at803x_set_wol,
202 .resume = at803x_resume, 303 .get_wol = at803x_get_wol,
203 .features = PHY_GBIT_FEATURES, 304 .suspend = at803x_suspend,
204 .flags = PHY_HAS_INTERRUPT, 305 .resume = at803x_resume,
205 .config_aneg = genphy_config_aneg, 306 .features = PHY_GBIT_FEATURES,
206 .read_status = genphy_read_status, 307 .flags = PHY_HAS_INTERRUPT,
207 .driver = { 308 .config_aneg = genphy_config_aneg,
309 .read_status = genphy_read_status,
310 .driver = {
208 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
209 }, 312 },
210}, { 313}, {
211 /* ATHEROS 8030 */ 314 /* ATHEROS 8030 */
212 .phy_id = 0x004dd076, 315 .phy_id = ATH8030_PHY_ID,
213 .name = "Atheros 8030 ethernet", 316 .name = "Atheros 8030 ethernet",
214 .phy_id_mask = 0xffffffef, 317 .phy_id_mask = 0xffffffef,
215 .config_init = at803x_config_init, 318 .probe = at803x_probe,
216 .set_wol = at803x_set_wol, 319 .config_init = at803x_config_init,
217 .get_wol = at803x_get_wol, 320 .link_change_notify = at803x_link_change_notify,
218 .suspend = at803x_suspend, 321 .set_wol = at803x_set_wol,
219 .resume = at803x_resume, 322 .get_wol = at803x_get_wol,
220 .features = PHY_GBIT_FEATURES, 323 .suspend = at803x_suspend,
221 .flags = PHY_HAS_INTERRUPT, 324 .resume = at803x_resume,
222 .config_aneg = genphy_config_aneg, 325 .features = PHY_GBIT_FEATURES,
223 .read_status = genphy_read_status, 326 .flags = PHY_HAS_INTERRUPT,
224 .driver = { 327 .config_aneg = genphy_config_aneg,
328 .read_status = genphy_read_status,
329 .driver = {
225 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
226 }, 331 },
227}, { 332}, {
228 /* ATHEROS 8031 */ 333 /* ATHEROS 8031 */
229 .phy_id = 0x004dd074, 334 .phy_id = ATH8031_PHY_ID,
230 .name = "Atheros 8031 ethernet", 335 .name = "Atheros 8031 ethernet",
231 .phy_id_mask = 0xffffffef, 336 .phy_id_mask = 0xffffffef,
232 .config_init = at803x_config_init, 337 .probe = at803x_probe,
233 .set_wol = at803x_set_wol, 338 .config_init = at803x_config_init,
234 .get_wol = at803x_get_wol, 339 .link_change_notify = at803x_link_change_notify,
235 .suspend = at803x_suspend, 340 .set_wol = at803x_set_wol,
236 .resume = at803x_resume, 341 .get_wol = at803x_get_wol,
237 .features = PHY_GBIT_FEATURES, 342 .suspend = at803x_suspend,
238 .flags = PHY_HAS_INTERRUPT, 343 .resume = at803x_resume,
239 .config_aneg = genphy_config_aneg, 344 .features = PHY_GBIT_FEATURES,
240 .read_status = genphy_read_status, 345 .flags = PHY_HAS_INTERRUPT,
241 .ack_interrupt = &at803x_ack_interrupt, 346 .config_aneg = genphy_config_aneg,
242 .config_intr = &at803x_config_intr, 347 .read_status = genphy_read_status,
243 .driver = { 348 .ack_interrupt = &at803x_ack_interrupt,
349 .config_intr = &at803x_config_intr,
350 .driver = {
244 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
245 }, 352 },
246} }; 353} };
@@ -260,9 +367,9 @@ module_init(atheros_init);
260module_exit(atheros_exit); 367module_exit(atheros_exit);
261 368
262static struct mdio_device_id __maybe_unused atheros_tbl[] = { 369static struct mdio_device_id __maybe_unused atheros_tbl[] = {
263 { 0x004dd076, 0xffffffef }, 370 { ATH8030_PHY_ID, 0xffffffef },
264 { 0x004dd074, 0xffffffef }, 371 { ATH8031_PHY_ID, 0xffffffef },
265 { 0x004dd072, 0xffffffef }, 372 { ATH8035_PHY_ID, 0xffffffef },
266 { } 373 { }
267}; 374};
268 375
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3bc079a67a3d..f7c61812ea4a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -720,6 +720,9 @@ void phy_state_machine(struct work_struct *work)
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
722 722
723 if (phydev->drv->link_change_notify)
724 phydev->drv->link_change_notify(phydev);
725
723 switch (phydev->state) { 726 switch (phydev->state) {
724 case PHY_DOWN: 727 case PHY_DOWN:
725 case PHY_STARTING: 728 case PHY_STARTING:
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index ad4a94e9ff57..87526443841f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -83,6 +83,7 @@
83#include <linux/delay.h> 83#include <linux/delay.h>
84#include <linux/init.h> 84#include <linux/init.h>
85#include <linux/slab.h> 85#include <linux/slab.h>
86#include <linux/workqueue.h>
86#include "slip.h" 87#include "slip.h"
87#ifdef CONFIG_INET 88#ifdef CONFIG_INET
88#include <linux/ip.h> 89#include <linux/ip.h>
@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
416#endif 417#endif
417} 418}
418 419
419/* 420/* Write out any remaining transmit buffer. Scheduled when tty is writable */
420 * Called by the driver when there's room for more data. If we have 421static void slip_transmit(struct work_struct *work)
421 * more packets to send, we send them here.
422 */
423static void slip_write_wakeup(struct tty_struct *tty)
424{ 422{
423 struct slip *sl = container_of(work, struct slip, tx_work);
425 int actual; 424 int actual;
426 struct slip *sl = tty->disc_data;
427 425
426 spin_lock_bh(&sl->lock);
428 /* First make sure we're connected. */ 427 /* First make sure we're connected. */
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 428 if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
429 spin_unlock_bh(&sl->lock);
430 return; 430 return;
431 }
431 432
432 spin_lock_bh(&sl->lock);
433 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 435 * transmission of another packet */
436 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
438 spin_unlock_bh(&sl->lock); 438 spin_unlock_bh(&sl->lock);
439 sl_unlock(sl); 439 sl_unlock(sl);
440 return; 440 return;
441 } 441 }
442 442
443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
444 sl->xleft -= actual; 444 sl->xleft -= actual;
445 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock_bh(&sl->lock); 446 spin_unlock_bh(&sl->lock);
447} 447}
448 448
449/*
450 * Called by the driver when there's room for more data.
451 * Schedule the transmit.
452 */
453static void slip_write_wakeup(struct tty_struct *tty)
454{
455 struct slip *sl = tty->disc_data;
456
457 schedule_work(&sl->tx_work);
458}
459
449static void sl_tx_timeout(struct net_device *dev) 460static void sl_tx_timeout(struct net_device *dev)
450{ 461{
451 struct slip *sl = netdev_priv(dev); 462 struct slip *sl = netdev_priv(dev);
@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
749 sl->magic = SLIP_MAGIC; 760 sl->magic = SLIP_MAGIC;
750 sl->dev = dev; 761 sl->dev = dev;
751 spin_lock_init(&sl->lock); 762 spin_lock_init(&sl->lock);
763 INIT_WORK(&sl->tx_work, slip_transmit);
752 sl->mode = SL_MODE_DEFAULT; 764 sl->mode = SL_MODE_DEFAULT;
753#ifdef CONFIG_SLIP_SMART 765#ifdef CONFIG_SLIP_SMART
754 /* initialize timer_list struct */ 766 /* initialize timer_list struct */
@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
872 if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) 884 if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
873 return; 885 return;
874 886
887 spin_lock_bh(&sl->lock);
875 tty->disc_data = NULL; 888 tty->disc_data = NULL;
876 sl->tty = NULL; 889 sl->tty = NULL;
890 spin_unlock_bh(&sl->lock);
891
892 flush_work(&sl->tx_work);
877 893
878 /* VSV = very important to remove timers */ 894 /* VSV = very important to remove timers */
879#ifdef CONFIG_SLIP_SMART 895#ifdef CONFIG_SLIP_SMART
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index 67673cf1266b..cf32aadf508f 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -53,6 +53,7 @@ struct slip {
53 struct tty_struct *tty; /* ptr to TTY structure */ 53 struct tty_struct *tty; /* ptr to TTY structure */
54 struct net_device *dev; /* easy for intr handling */ 54 struct net_device *dev; /* easy for intr handling */
55 spinlock_t lock; 55 spinlock_t lock;
56 struct work_struct tx_work; /* Flushes transmit buffer */
56 57
57#ifdef SL_INCLUDE_CSLIP 58#ifdef SL_INCLUDE_CSLIP
58 struct slcompress *slcomp; /* for header compression */ 59 struct slcompress *slcomp; /* for header compression */
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index f9822bc75425..5d95a13dbe2a 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
84 ctx = drvstate->ctx; 84 ctx = drvstate->ctx;
85 85
86 if (usbnet_dev->status) 86 if (usbnet_dev->status)
87 /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 87 /* The wMaxCommand buffer must be big enough to hold
88 * decimal (0x100)" 88 * any message from the modem. Experience has shown
89 * that some replies are more than 256 bytes long
89 */ 90 */
90 subdriver = usb_cdc_wdm_register(ctx->control, 91 subdriver = usb_cdc_wdm_register(ctx->control,
91 &usbnet_dev->status->desc, 92 &usbnet_dev->status->desc,
92 256, /* wMaxCommand */ 93 1024, /* wMaxCommand */
93 huawei_cdc_ncm_wdm_manage_power); 94 huawei_cdc_ncm_wdm_manage_power);
94 if (IS_ERR(subdriver)) { 95 if (IS_ERR(subdriver)) {
95 ret = PTR_ERR(subdriver); 96 ret = PTR_ERR(subdriver);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 97394345e5dd..b76f7dcde0db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
2589 for (i = 0; i < adapter->num_tx_queues; i++) 2589 for (i = 0; i < adapter->num_tx_queues; i++)
2590 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2590 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2591 2591
2592 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2592 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
2593 VMXNET3_DEF_RX_RING_SIZE, 2593 adapter->rx_ring_size,
2594 VMXNET3_DEF_RX_RING_SIZE); 2594 VMXNET3_DEF_RX_RING_SIZE);
2595 if (err) 2595 if (err)
2596 goto queue_err; 2596 goto queue_err;
@@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2968 adapter->netdev = netdev; 2968 adapter->netdev = netdev;
2969 adapter->pdev = pdev; 2969 adapter->pdev = pdev;
2970 2970
2971 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
2972 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
2973
2971 spin_lock_init(&adapter->cmd_lock); 2974 spin_lock_init(&adapter->cmd_lock);
2972 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 2975 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2973 sizeof(struct vmxnet3_adapter), 2976 sizeof(struct vmxnet3_adapter),
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 40c1c7b0d9e0..b725fd9e7803 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
449 param->rx_mini_max_pending = 0; 449 param->rx_mini_max_pending = 0;
450 param->rx_jumbo_max_pending = 0; 450 param->rx_jumbo_max_pending = 0;
451 451
452 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size; 452 param->rx_pending = adapter->rx_ring_size;
453 param->tx_pending = adapter->tx_queue[0].tx_ring.size; 453 param->tx_pending = adapter->tx_ring_size;
454 param->rx_mini_pending = 0; 454 param->rx_mini_pending = 0;
455 param->rx_jumbo_pending = 0; 455 param->rx_jumbo_pending = 0;
456} 456}
@@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
529 * size */ 529 * size */
530 netdev_err(netdev, "failed to apply new sizes, " 530 netdev_err(netdev, "failed to apply new sizes, "
531 "try the default ones\n"); 531 "try the default ones\n");
532 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
533 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
532 err = vmxnet3_create_queues(adapter, 534 err = vmxnet3_create_queues(adapter,
533 VMXNET3_DEF_TX_RING_SIZE, 535 new_tx_ring_size,
534 VMXNET3_DEF_RX_RING_SIZE, 536 new_rx_ring_size,
535 VMXNET3_DEF_RX_RING_SIZE); 537 VMXNET3_DEF_RX_RING_SIZE);
536 if (err) { 538 if (err) {
537 netdev_err(netdev, "failed to create queues " 539 netdev_err(netdev, "failed to create queues "
@@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
545 netdev_err(netdev, "failed to re-activate, error %d." 547 netdev_err(netdev, "failed to re-activate, error %d."
546 " Closing it\n", err); 548 " Closing it\n", err);
547 } 549 }
550 adapter->tx_ring_size = new_tx_ring_size;
551 adapter->rx_ring_size = new_rx_ring_size;
548 552
549out: 553out:
550 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 554 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 190569d02450..29ee77f2c97f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -349,6 +349,11 @@ struct vmxnet3_adapter {
349 u32 link_speed; /* in mbps */ 349 u32 link_speed; /* in mbps */
350 350
351 u64 tx_timeout_count; 351 u64 tx_timeout_count;
352
353 /* Ring sizes */
354 u32 tx_ring_size;
355 u32 rx_ring_size;
356
352 struct work_struct work; 357 struct work_struct work;
353 358
354 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 359 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index e3f67b8d3f80..40fd9b7b1426 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -36,7 +36,7 @@ config B43_SSB
36choice 36choice
37 prompt "Supported bus types" 37 prompt "Supported bus types"
38 depends on B43 38 depends on B43
39 default B43_BCMA_AND_SSB 39 default B43_BUSES_BCMA_AND_SSB
40 40
41config B43_BUSES_BCMA_AND_SSB 41config B43_BUSES_BCMA_AND_SSB
42 bool "BCMA and SSB" 42 bool "BCMA and SSB"
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 32538ac5f7e4..0d6a0bb1f876 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5221,6 +5221,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5221 /* We don't support 5 GHz on some PHYs yet */ 5221 /* We don't support 5 GHz on some PHYs yet */
5222 switch (dev->phy.type) { 5222 switch (dev->phy.type) {
5223 case B43_PHYTYPE_A: 5223 case B43_PHYTYPE_A:
5224 case B43_PHYTYPE_G:
5224 case B43_PHYTYPE_N: 5225 case B43_PHYTYPE_N:
5225 case B43_PHYTYPE_LP: 5226 case B43_PHYTYPE_LP:
5226 case B43_PHYTYPE_HT: 5227 case B43_PHYTYPE_HT:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 4f38f19b8e3d..6e6ef3fc2247 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -811,9 +811,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
811 break; 811 break;
812 case B43_PHYTYPE_G: 812 case B43_PHYTYPE_G:
813 status.band = IEEE80211_BAND_2GHZ; 813 status.band = IEEE80211_BAND_2GHZ;
814 /* chanid is the radio channel cookie value as used 814 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
815 * to tune the radio. */ 815 * has been modified to be compatible with N-PHY and others.
816 status.freq = chanid + 2400; 816 */
817 if (dev->fw.rev >= 508)
818 status.freq = ieee80211_channel_to_frequency(chanid, status.band);
819 else
820 status.freq = chanid + 2400;
817 break; 821 break;
818 case B43_PHYTYPE_N: 822 case B43_PHYTYPE_N:
819 case B43_PHYTYPE_LP: 823 case B43_PHYTYPE_LP:
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 574d4b597468..2cc9b6fca490 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -50,7 +50,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
50 return -1; 50 return -1;
51 } 51 }
52 mapping.len = size; 52 mapping.len = size;
53 memcpy(skb->cb, &mapping, sizeof(mapping)); 53 mwifiex_store_mapping(skb, &mapping);
54 return 0; 54 return 0;
55} 55}
56 56
@@ -60,7 +60,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
60 struct pcie_service_card *card = adapter->card; 60 struct pcie_service_card *card = adapter->card;
61 struct mwifiex_dma_mapping mapping; 61 struct mwifiex_dma_mapping mapping;
62 62
63 MWIFIEX_SKB_PACB(skb, &mapping); 63 mwifiex_get_mapping(skb, &mapping);
64 pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); 64 pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
65} 65}
66 66
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index ddae57021397..caadb3737b9e 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -20,32 +20,55 @@
20#ifndef _MWIFIEX_UTIL_H_ 20#ifndef _MWIFIEX_UTIL_H_
21#define _MWIFIEX_UTIL_H_ 21#define _MWIFIEX_UTIL_H_
22 22
23struct mwifiex_dma_mapping {
24 dma_addr_t addr;
25 size_t len;
26};
27
28struct mwifiex_cb {
29 struct mwifiex_dma_mapping dma_mapping;
30 union {
31 struct mwifiex_rxinfo rx_info;
32 struct mwifiex_txinfo tx_info;
33 };
34};
35
23static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb) 36static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
24{ 37{
25 return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t)); 38 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
39
40 BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
41 return &cb->rx_info;
26} 42}
27 43
28static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb) 44static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
29{ 45{
30 return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t)); 46 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
47
48 return &cb->tx_info;
31} 49}
32 50
33struct mwifiex_dma_mapping { 51static inline void mwifiex_store_mapping(struct sk_buff *skb,
34 dma_addr_t addr; 52 struct mwifiex_dma_mapping *mapping)
35 size_t len; 53{
36}; 54 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
55
56 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
57}
37 58
38static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, 59static inline void mwifiex_get_mapping(struct sk_buff *skb,
39 struct mwifiex_dma_mapping *mapping) 60 struct mwifiex_dma_mapping *mapping)
40{ 61{
41 memcpy(mapping, skb->cb, sizeof(*mapping)); 62 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
63
64 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
42} 65}
43 66
44static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb) 67static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
45{ 68{
46 struct mwifiex_dma_mapping mapping; 69 struct mwifiex_dma_mapping mapping;
47 70
48 MWIFIEX_SKB_PACB(skb, &mapping); 71 mwifiex_get_mapping(skb, &mapping);
49 72
50 return mapping.addr; 73 return mapping.addr;
51} 74}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2f1cd929c6f6..a511cccc9f01 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1681 /* 1681 /*
1682 * Detect if this device has an hardware controlled radio. 1682 * Detect if this device has an hardware controlled radio.
1683 */ 1683 */
1684 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1684 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
1685 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); 1685 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1686 /*
1687 * On this device RFKILL initialized during probe does not work.
1688 */
1689 __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
1690 }
1686 1691
1687 /* 1692 /*
1688 * Check if the BBP tuning should be enabled. 1693 * Check if the BBP tuning should be enabled.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a49c3d73ea2c..e11dab2216c6 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -229,6 +229,27 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
229/* 229/*
230 * Firmware functions 230 * Firmware functions
231 */ 231 */
232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
233{
234 __le32 reg;
235 u32 fw_mode;
236
237 /* cannot use rt2x00usb_register_read here as it uses different
238 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
239 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
240 * returned value would be invalid.
241 */
242 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
243 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
244 &reg, sizeof(reg), REGISTER_TIMEOUT_FIRMWARE);
245 fw_mode = le32_to_cpu(reg);
246
247 if ((fw_mode & 0x00000003) == 2)
248 return 1;
249
250 return 0;
251}
252
232static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 253static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
233{ 254{
234 return FIRMWARE_RT2870; 255 return FIRMWARE_RT2870;
@@ -257,8 +278,13 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
257 /* 278 /*
258 * Write firmware to device. 279 * Write firmware to device.
259 */ 280 */
260 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 281 if (rt2800usb_autorun_detect(rt2x00dev)) {
261 data + offset, length); 282 rt2x00_info(rt2x00dev,
283 "Firmware loading not required - NIC in AutoRun mode\n");
284 } else {
285 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
286 data + offset, length);
287 }
262 288
263 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 289 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
264 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 290 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -735,11 +761,18 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
735/* 761/*
736 * Device probe functions. 762 * Device probe functions.
737 */ 763 */
764static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
765{
766 if (rt2800usb_autorun_detect(rt2x00dev))
767 return 1;
768 return rt2800_efuse_detect(rt2x00dev);
769}
770
738static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) 771static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
739{ 772{
740 int retval; 773 int retval;
741 774
742 if (rt2800_efuse_detect(rt2x00dev)) 775 if (rt2800usb_efuse_detect(rt2x00dev))
743 retval = rt2800_read_eeprom_efuse(rt2x00dev); 776 retval = rt2800_read_eeprom_efuse(rt2x00dev);
744 else 777 else
745 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 778 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 010b76505243..d13f25cd70d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
693 REQUIRE_SW_SEQNO, 693 REQUIRE_SW_SEQNO,
694 REQUIRE_HT_TX_DESC, 694 REQUIRE_HT_TX_DESC,
695 REQUIRE_PS_AUTOWAKE, 695 REQUIRE_PS_AUTOWAKE,
696 REQUIRE_DELAYED_RFKILL,
696 697
697 /* 698 /*
698 * Capabilities 699 * Capabilities
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 2bde6729f5e6..4fa43a2eeb73 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1126 return; 1126 return;
1127 1127
1128 /* 1128 /*
1129 * Unregister extra components. 1129 * Stop rfkill polling.
1130 */ 1130 */
1131 rt2x00rfkill_unregister(rt2x00dev); 1131 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1132 rt2x00rfkill_unregister(rt2x00dev);
1132 1133
1133 /* 1134 /*
1134 * Allow the HW to uninitialize. 1135 * Allow the HW to uninitialize.
@@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1166 1167
1167 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); 1168 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
1168 1169
1170 /*
1171 * Start rfkill polling.
1172 */
1173 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1174 rt2x00rfkill_register(rt2x00dev);
1175
1169 return 0; 1176 return 0;
1170} 1177}
1171 1178
@@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1375 rt2x00link_register(rt2x00dev); 1382 rt2x00link_register(rt2x00dev);
1376 rt2x00leds_register(rt2x00dev); 1383 rt2x00leds_register(rt2x00dev);
1377 rt2x00debug_register(rt2x00dev); 1384 rt2x00debug_register(rt2x00dev);
1378 rt2x00rfkill_register(rt2x00dev); 1385
1386 /*
1387 * Start rfkill polling.
1388 */
1389 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1390 rt2x00rfkill_register(rt2x00dev);
1379 1391
1380 return 0; 1392 return 0;
1381 1393
@@ -1391,6 +1403,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1391 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 1403 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1392 1404
1393 /* 1405 /*
1406 * Stop rfkill polling.
1407 */
1408 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1409 rt2x00rfkill_unregister(rt2x00dev);
1410
1411 /*
1394 * Disable radio. 1412 * Disable radio.
1395 */ 1413 */
1396 rt2x00lib_disable_radio(rt2x00dev); 1414 rt2x00lib_disable_radio(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 212ac4842c16..004dff9b962d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
487 crypto.cipher = rt2x00crypto_key_to_cipher(key); 487 crypto.cipher = rt2x00crypto_key_to_cipher(key);
488 if (crypto.cipher == CIPHER_NONE) 488 if (crypto.cipher == CIPHER_NONE)
489 return -EOPNOTSUPP; 489 return -EOPNOTSUPP;
490 if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
491 return -EOPNOTSUPP;
490 492
491 crypto.cmd = cmd; 493 crypto.cmd = cmd;
492 494
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index e7bcf62347d5..831b65f93feb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -93,6 +93,7 @@ enum rt2x00usb_mode_offset {
93 USB_MODE_SLEEP = 7, /* RT73USB */ 93 USB_MODE_SLEEP = 7, /* RT73USB */
94 USB_MODE_FIRMWARE = 8, /* RT73USB */ 94 USB_MODE_FIRMWARE = 8, /* RT73USB */
95 USB_MODE_WAKEUP = 9, /* RT73USB */ 95 USB_MODE_WAKEUP = 9, /* RT73USB */
96 USB_MODE_AUTORUN = 17, /* RT2800USB */
96}; 97};
97 98
98/** 99/**
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4dd7c4a1923b..2532ce85d718 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -222,6 +222,7 @@ struct xenvif {
222 222
223 /* Queues */ 223 /* Queues */
224 struct xenvif_queue *queues; 224 struct xenvif_queue *queues;
225 unsigned int num_queues; /* active queues, resource allocated */
225 226
226 /* Miscellaneous private stuff. */ 227 /* Miscellaneous private stuff. */
227 struct net_device *dev; 228 struct net_device *dev;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 852da34b8961..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
137 } 137 }
138} 138}
139 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
162{ 141{
163 struct xenvif *vif = netdev_priv(dev); 142 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL; 143 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues; 144 unsigned int num_queues = vif->num_queues;
166 u16 index; 145 u16 index;
167 int min_slots_needed; 146 int min_slots_needed;
168 147
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
225{ 204{
226 struct xenvif *vif = netdev_priv(dev); 205 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL; 206 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues; 207 unsigned int num_queues = vif->num_queues;
229 unsigned long rx_bytes = 0; 208 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0; 209 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0; 210 unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
256static void xenvif_up(struct xenvif *vif) 235static void xenvif_up(struct xenvif *vif)
257{ 236{
258 struct xenvif_queue *queue = NULL; 237 struct xenvif_queue *queue = NULL;
259 unsigned int num_queues = vif->dev->real_num_tx_queues; 238 unsigned int num_queues = vif->num_queues;
260 unsigned int queue_index; 239 unsigned int queue_index;
261 240
262 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 241 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
272static void xenvif_down(struct xenvif *vif) 251static void xenvif_down(struct xenvif *vif)
273{ 252{
274 struct xenvif_queue *queue = NULL; 253 struct xenvif_queue *queue = NULL;
275 unsigned int num_queues = vif->dev->real_num_tx_queues; 254 unsigned int num_queues = vif->num_queues;
276 unsigned int queue_index; 255 unsigned int queue_index;
277 256
278 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 257 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
379 struct ethtool_stats *stats, u64 * data) 358 struct ethtool_stats *stats, u64 * data)
380{ 359{
381 struct xenvif *vif = netdev_priv(dev); 360 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues; 361 unsigned int num_queues = vif->num_queues;
383 int i; 362 int i;
384 unsigned int queue_index; 363 unsigned int queue_index;
385 struct xenvif_stats *vif_stats; 364 struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
424 .ndo_fix_features = xenvif_fix_features, 403 .ndo_fix_features = xenvif_fix_features,
425 .ndo_set_mac_address = eth_mac_addr, 404 .ndo_set_mac_address = eth_mac_addr,
426 .ndo_validate_addr = eth_validate_addr, 405 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
428}; 406};
429 407
430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 408struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 416 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 /* Allocate a netdev with the max. supported number of queues. 417 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated 418 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues(). 419 * via netif_set_real_num_*_queues().
442 */ 420 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues); 422 xenvif_max_queues);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
458 vif->dev = dev; 436 vif->dev = dev;
459 vif->disabled = false; 437 vif->disabled = false;
460 438
461 /* Start out with no queues. The call below does not require 439 /* Start out with no queues. */
462 * rtnl_lock() as it happens before register_netdev().
463 */
464 vif->queues = NULL; 440 vif->queues = NULL;
465 netif_set_real_num_tx_queues(dev, 0); 441 vif->num_queues = 0;
466 442
467 dev->netdev_ops = &xenvif_netdev_ops; 443 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 444 dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
677void xenvif_disconnect(struct xenvif *vif) 653void xenvif_disconnect(struct xenvif *vif)
678{ 654{
679 struct xenvif_queue *queue = NULL; 655 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues; 656 unsigned int num_queues = vif->num_queues;
681 unsigned int queue_index; 657 unsigned int queue_index;
682 658
683 if (netif_carrier_ok(vif->dev)) 659 if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
724void xenvif_free(struct xenvif *vif) 700void xenvif_free(struct xenvif *vif)
725{ 701{
726 struct xenvif_queue *queue = NULL; 702 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues; 703 unsigned int num_queues = vif->num_queues;
728 unsigned int queue_index; 704 unsigned int queue_index;
729 /* Here we want to avoid timeout messages if an skb can be legitimately 705 /* Here we want to avoid timeout messages if an skb can be legitimately
730 * stuck somewhere else. Realistically this could be an another vif's 706 * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
748 xenvif_deinit_queue(queue); 724 xenvif_deinit_queue(queue);
749 } 725 }
750 726
751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues); 727 vfree(vif->queues);
756 vif->queues = NULL; 728 vif->queues = NULL;
729 vif->num_queues = 0;
757 730
758 free_netdev(vif->dev); 731 free_netdev(vif->dev);
759 732
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 96c63dc2509e..3d85acd84bad 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
527 /* Use the number of queues requested by the frontend */ 527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues * 528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue)); 529 sizeof(struct xenvif_queue));
530 rtnl_lock(); 530 be->vif->num_queues = requested_num_queues;
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533 531
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 532 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index]; 533 queue = &be->vif->queues[queue_index];
@@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
546 * earlier queues can be destroyed using the regular 544 * earlier queues can be destroyed using the regular
547 * disconnect logic. 545 * disconnect logic.
548 */ 546 */
549 rtnl_lock(); 547 be->vif->num_queues = queue_index;
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err; 548 goto err;
553 } 549 }
554 550
@@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
561 * and also clean up any previously initialised queues. 557 * and also clean up any previously initialised queues.
562 */ 558 */
563 xenvif_deinit_queue(queue); 559 xenvif_deinit_queue(queue);
564 rtnl_lock(); 560 be->vif->num_queues = queue_index;
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err; 561 goto err;
568 } 562 }
569 } 563 }
570 564
565 /* Initialisation completed, tell core driver the number of
566 * active queues.
567 */
568 rtnl_lock();
569 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
570 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
571 rtnl_unlock();
572
571 xenvif_carrier_on(be->vif); 573 xenvif_carrier_on(be->vif);
572 574
573 unregister_hotplug_status_watch(be); 575 unregister_hotplug_status_watch(be);
@@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
582 return; 584 return;
583 585
584err: 586err:
585 if (be->vif->dev->real_num_tx_queues > 0) 587 if (be->vif->num_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */ 588 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues); 589 vfree(be->vif->queues);
588 be->vif->queues = NULL; 590 be->vif->queues = NULL;
589 rtnl_lock(); 591 be->vif->num_queues = 0;
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return; 592 return;
593} 593}
594 594
@@ -596,7 +596,7 @@ err:
596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
597{ 597{
598 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues; 599 unsigned int num_queues = queue->vif->num_queues;
600 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
601 unsigned int tx_evtchn, rx_evtchn; 601 unsigned int tx_evtchn, rx_evtchn;
602 int err; 602 int err;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5a7872ac3566..2ccb4a02368b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1287 1287
1288 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1290 napi_schedule(&queue->napi); 1290 napi_schedule(&queue->napi);
1291 1291
1292 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1293} 1293}
@@ -1437,10 +1437,11 @@ static void xennet_end_access(int ref, void *page)
1437static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1438{ 1438{
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 struct netfront_queue *queue = NULL;
1441 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1442 1441
1443 for (i = 0; i < num_queues; ++i) { 1442 for (i = 0; i < num_queues; ++i) {
1443 struct netfront_queue *queue = &info->queues[i];
1444
1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1445 /* Stop old i/f to prevent errors whilst we rebuild the state. */
1445 spin_lock_bh(&queue->rx_lock); 1446 spin_lock_bh(&queue->rx_lock);
1446 spin_lock_irq(&queue->tx_lock); 1447 spin_lock_irq(&queue->tx_lock);
@@ -1698,8 +1699,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
1698 goto exit_free_tx; 1699 goto exit_free_tx;
1699 } 1700 }
1700 1701
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0; 1702 return 0;
1704 1703
1705 exit_free_tx: 1704 exit_free_tx:
@@ -1790,6 +1789,70 @@ error:
1790 return err; 1789 return err;
1791} 1790}
1792 1791
1792static void xennet_destroy_queues(struct netfront_info *info)
1793{
1794 unsigned int i;
1795
1796 rtnl_lock();
1797
1798 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1799 struct netfront_queue *queue = &info->queues[i];
1800
1801 if (netif_running(info->netdev))
1802 napi_disable(&queue->napi);
1803 netif_napi_del(&queue->napi);
1804 }
1805
1806 rtnl_unlock();
1807
1808 kfree(info->queues);
1809 info->queues = NULL;
1810}
1811
1812static int xennet_create_queues(struct netfront_info *info,
1813 unsigned int num_queues)
1814{
1815 unsigned int i;
1816 int ret;
1817
1818 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1819 GFP_KERNEL);
1820 if (!info->queues)
1821 return -ENOMEM;
1822
1823 rtnl_lock();
1824
1825 for (i = 0; i < num_queues; i++) {
1826 struct netfront_queue *queue = &info->queues[i];
1827
1828 queue->id = i;
1829 queue->info = info;
1830
1831 ret = xennet_init_queue(queue);
1832 if (ret < 0) {
1833 dev_warn(&info->netdev->dev, "only created %d queues\n",
1834 num_queues);
1835 num_queues = i;
1836 break;
1837 }
1838
1839 netif_napi_add(queue->info->netdev, &queue->napi,
1840 xennet_poll, 64);
1841 if (netif_running(info->netdev))
1842 napi_enable(&queue->napi);
1843 }
1844
1845 netif_set_real_num_tx_queues(info->netdev, num_queues);
1846
1847 rtnl_unlock();
1848
1849 if (num_queues == 0) {
1850 dev_err(&info->netdev->dev, "no queues\n");
1851 return -EINVAL;
1852 }
1853 return 0;
1854}
1855
1793/* Common code used when first setting up, and when resuming. */ 1856/* Common code used when first setting up, and when resuming. */
1794static int talk_to_netback(struct xenbus_device *dev, 1857static int talk_to_netback(struct xenbus_device *dev,
1795 struct netfront_info *info) 1858 struct netfront_info *info)
@@ -1826,42 +1889,20 @@ static int talk_to_netback(struct xenbus_device *dev,
1826 goto out; 1889 goto out;
1827 } 1890 }
1828 1891
1829 /* Allocate array of queues */ 1892 if (info->queues)
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); 1893 xennet_destroy_queues(info);
1831 if (!info->queues) { 1894
1832 err = -ENOMEM; 1895 err = xennet_create_queues(info, num_queues);
1833 goto out; 1896 if (err < 0)
1834 } 1897 goto destroy_ring;
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838 1898
1839 /* Create shared ring, alloc event channel -- for each queue */ 1899 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) { 1900 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i]; 1901 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn); 1902 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) { 1903 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy 1904 /* setup_netfront() will tidy up the current
1864 * up the current queue on error, but we need to clean up 1905 * queue on error, but we need to clean up
1865 * those already allocated. 1906 * those already allocated.
1866 */ 1907 */
1867 if (i > 0) { 1908 if (i > 0) {
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fb4a59830648..a3bf2122a8d5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -323,11 +323,13 @@ int of_phy_register_fixed_link(struct device_node *np)
323 fixed_link_node = of_get_child_by_name(np, "fixed-link"); 323 fixed_link_node = of_get_child_by_name(np, "fixed-link");
324 if (fixed_link_node) { 324 if (fixed_link_node) {
325 status.link = 1; 325 status.link = 1;
326 status.duplex = of_property_read_bool(np, "full-duplex"); 326 status.duplex = of_property_read_bool(fixed_link_node,
327 "full-duplex");
327 if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 328 if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
328 return -EINVAL; 329 return -EINVAL;
329 status.pause = of_property_read_bool(np, "pause"); 330 status.pause = of_property_read_bool(fixed_link_node, "pause");
330 status.asym_pause = of_property_read_bool(np, "asym-pause"); 331 status.asym_pause = of_property_read_bool(fixed_link_node,
332 "asym-pause");
331 of_node_put(fixed_link_node); 333 of_node_put(fixed_link_node);
332 return fixed_phy_register(PHY_POLL, &status, np); 334 return fixed_phy_register(PHY_POLL, &status, np);
333 } 335 }
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6aea373547f6..ee3de3421f2d 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -74,7 +74,7 @@ config DP83640_PHY
74 74
75config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
76 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
77 depends on X86 || COMPILE_TEST 77 depends on X86_32 || COMPILE_TEST
78 depends on HAS_IOMEM && NET 78 depends on HAS_IOMEM && NET
79 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
80 help 80 help
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 57544e254a78..58ece59367ae 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -119,6 +119,10 @@ static const unsigned int ldo_c_table[] = {
119 2900000, 3000000, 3300000, 119 2900000, 3000000, 3300000,
120}; 120};
121 121
122static const unsigned int ldo_vbus[] = {
123 5000000,
124};
125
122/* DCDC group CSR: supported voltages in microvolts */ 126/* DCDC group CSR: supported voltages in microvolts */
123static const struct regulator_linear_range dcdc_csr_ranges[] = { 127static const struct regulator_linear_range dcdc_csr_ranges[] = {
124 REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000), 128 REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
@@ -192,6 +196,7 @@ static struct bcm590xx_info bcm590xx_regs[] = {
192 BCM590XX_REG_TABLE(gpldo4, ldo_a_table), 196 BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
193 BCM590XX_REG_TABLE(gpldo5, ldo_a_table), 197 BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
194 BCM590XX_REG_TABLE(gpldo6, ldo_a_table), 198 BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
199 BCM590XX_REG_TABLE(vbus, ldo_vbus),
195}; 200};
196 201
197struct bcm590xx_reg { 202struct bcm590xx_reg {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index b982f0ff4e01..93b4ad842901 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -325,6 +325,10 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
325 if (rail_enable) 325 if (rail_enable)
326 palmas_smps_write(pmic->palmas, 326 palmas_smps_write(pmic->palmas,
327 palmas_regs_info[id].ctrl_addr, reg); 327 palmas_regs_info[id].ctrl_addr, reg);
328
329 /* Switch the enable value to ensure this is used for enable */
330 pmic->desc[id].enable_val = pmic->current_reg_mode[id];
331
328 return 0; 332 return 0;
329} 333}
330 334
@@ -964,6 +968,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
964 return ret; 968 return ret;
965 pmic->current_reg_mode[id] = reg & 969 pmic->current_reg_mode[id] = reg &
966 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; 970 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
971
972 pmic->desc[id].enable_reg =
973 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
974 palmas_regs_info[id].ctrl_addr);
975 pmic->desc[id].enable_mask =
976 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
977 /* set_mode overrides this value */
978 pmic->desc[id].enable_val = SMPS_CTRL_MODE_ON;
967 } 979 }
968 980
969 pmic->desc[id].type = REGULATOR_VOLTAGE; 981 pmic->desc[id].type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 69b4b7750410..9effe48c605e 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -209,7 +209,7 @@ static const struct regulator_desc regulators[] = {
209 1, -1, -1, TPS65218_REG_ENABLE1, 209 1, -1, -1, TPS65218_REG_ENABLE1,
210 TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0), 210 TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0),
211 TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64, 211 TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
212 TPS65218_REG_CONTROL_DCDC4, 212 TPS65218_REG_CONTROL_LDO1,
213 TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2, 213 TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
214 TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges, 214 TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
215 2, 0), 215 2, 0),
@@ -240,6 +240,7 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
240 config.init_data = init_data; 240 config.init_data = init_data;
241 config.driver_data = tps; 241 config.driver_data = tps;
242 config.regmap = tps->regmap; 242 config.regmap = tps->regmap;
243 config.of_node = pdev->dev.of_node;
243 244
244 rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config); 245 rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
245 if (IS_ERR(rdev)) { 246 if (IS_ERR(rdev)) {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index a98df7eeb42d..fe792106bdc5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -118,6 +118,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
118 */ 118 */
119 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 119 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
120 120
121 /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
121 value = orig | SPI_CS_CONTROL_SW_MODE; 122 value = orig | SPI_CS_CONTROL_SW_MODE;
122 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); 123 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
123 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 124 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
@@ -126,10 +127,13 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
126 goto detection_done; 127 goto detection_done;
127 } 128 }
128 129
129 value &= ~SPI_CS_CONTROL_SW_MODE; 130 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
131
132 /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
133 value = orig & ~SPI_CS_CONTROL_SW_MODE;
130 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); 134 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
131 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 135 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
132 if (value != orig) { 136 if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
133 offset = 0x800; 137 offset = 0x800;
134 goto detection_done; 138 goto detection_done;
135 } 139 }
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index fc1de86d3c8a..c08da380cb23 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -424,31 +424,6 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
424 return 0; 424 return 0;
425} 425}
426 426
427static void spi_qup_set_cs(struct spi_device *spi, bool enable)
428{
429 struct spi_qup *controller = spi_master_get_devdata(spi->master);
430
431 u32 iocontol, mask;
432
433 iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
434
435 /* Disable auto CS toggle and use manual */
436 iocontol &= ~SPI_IO_C_MX_CS_MODE;
437 iocontol |= SPI_IO_C_FORCE_CS;
438
439 iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
440 iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
441
442 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
443
444 if (enable)
445 iocontol |= mask;
446 else
447 iocontol &= ~mask;
448
449 writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
450}
451
452static int spi_qup_transfer_one(struct spi_master *master, 427static int spi_qup_transfer_one(struct spi_master *master,
453 struct spi_device *spi, 428 struct spi_device *spi,
454 struct spi_transfer *xfer) 429 struct spi_transfer *xfer)
@@ -571,12 +546,16 @@ static int spi_qup_probe(struct platform_device *pdev)
571 return -ENOMEM; 546 return -ENOMEM;
572 } 547 }
573 548
549 /* use num-cs unless not present or out of range */
550 if (of_property_read_u16(dev->of_node, "num-cs",
551 &master->num_chipselect) ||
552 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
553 master->num_chipselect = SPI_NUM_CHIPSELECTS;
554
574 master->bus_num = pdev->id; 555 master->bus_num = pdev->id;
575 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 556 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
576 master->num_chipselect = SPI_NUM_CHIPSELECTS;
577 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 557 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
578 master->max_speed_hz = max_freq; 558 master->max_speed_hz = max_freq;
579 master->set_cs = spi_qup_set_cs;
580 master->transfer_one = spi_qup_transfer_one; 559 master->transfer_one = spi_qup_transfer_one;
581 master->dev.of_node = pdev->dev.of_node; 560 master->dev.of_node = pdev->dev.of_node;
582 master->auto_runtime_pm = true; 561 master->auto_runtime_pm = true;
@@ -640,16 +619,19 @@ static int spi_qup_probe(struct platform_device *pdev)
640 if (ret) 619 if (ret)
641 goto error; 620 goto error;
642 621
643 ret = devm_spi_register_master(dev, master);
644 if (ret)
645 goto error;
646
647 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); 622 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
648 pm_runtime_use_autosuspend(dev); 623 pm_runtime_use_autosuspend(dev);
649 pm_runtime_set_active(dev); 624 pm_runtime_set_active(dev);
650 pm_runtime_enable(dev); 625 pm_runtime_enable(dev);
626
627 ret = devm_spi_register_master(dev, master);
628 if (ret)
629 goto disable_pm;
630
651 return 0; 631 return 0;
652 632
633disable_pm:
634 pm_runtime_disable(&pdev->dev);
653error: 635error:
654 clk_disable_unprepare(cclk); 636 clk_disable_unprepare(cclk);
655 clk_disable_unprepare(iclk); 637 clk_disable_unprepare(iclk);
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 1f56ef651d1a..b83dd733684c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -175,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
175{ 175{
176 struct sh_sci_spi *sp = platform_get_drvdata(dev); 176 struct sh_sci_spi *sp = platform_get_drvdata(dev);
177 177
178 iounmap(sp->membase);
179 setbits(sp, PIN_INIT, 0);
180 spi_bitbang_stop(&sp->bitbang); 178 spi_bitbang_stop(&sp->bitbang);
179 setbits(sp, PIN_INIT, 0);
180 iounmap(sp->membase);
181 spi_master_put(sp->bitbang.master); 181 spi_master_put(sp->bitbang.master);
182 return 0; 182 return 0;
183} 183}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 5663f4d19d02..1f4c794f5fcc 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1309 if (cmd->data_direction != DMA_TO_DEVICE) { 1309 if (cmd->data_direction != DMA_TO_DEVICE) {
1310 pr_err("Command ITT: 0x%08x received DataOUT for a" 1310 pr_err("Command ITT: 0x%08x received DataOUT for a"
1311 " NON-WRITE command.\n", cmd->init_task_tag); 1311 " NON-WRITE command.\n", cmd->init_task_tag);
1312 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); 1312 return iscsit_dump_data_payload(conn, payload_length, 1);
1313 } 1313 }
1314 se_cmd = &cmd->se_cmd; 1314 se_cmd = &cmd->se_cmd;
1315 iscsit_mod_dataout_timer(cmd); 1315 iscsit_mod_dataout_timer(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 19b842c3e0b3..ab4915c0d933 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -174,7 +174,6 @@ static int chap_server_compute_md5(
174 char *nr_out_ptr, 174 char *nr_out_ptr,
175 unsigned int *nr_out_len) 175 unsigned int *nr_out_len)
176{ 176{
177 char *endptr;
178 unsigned long id; 177 unsigned long id;
179 unsigned char id_as_uchar; 178 unsigned char id_as_uchar;
180 unsigned char digest[MD5_SIGNATURE_SIZE]; 179 unsigned char digest[MD5_SIGNATURE_SIZE];
@@ -320,9 +319,14 @@ static int chap_server_compute_md5(
320 } 319 }
321 320
322 if (type == HEX) 321 if (type == HEX)
323 id = simple_strtoul(&identifier[2], &endptr, 0); 322 ret = kstrtoul(&identifier[2], 0, &id);
324 else 323 else
325 id = simple_strtoul(identifier, &endptr, 0); 324 ret = kstrtoul(identifier, 0, &id);
325
326 if (ret < 0) {
327 pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
328 goto out;
329 }
326 if (id > 255) { 330 if (id > 255) {
327 pr_err("chap identifier: %lu greater than 255\n", id); 331 pr_err("chap identifier: %lu greater than 255\n", id);
328 goto out; 332 goto out;
@@ -351,6 +355,10 @@ static int chap_server_compute_md5(
351 pr_err("Unable to convert incoming challenge\n"); 355 pr_err("Unable to convert incoming challenge\n");
352 goto out; 356 goto out;
353 } 357 }
358 if (challenge_len > 1024) {
359 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
360 goto out;
361 }
354 /* 362 /*
355 * During mutual authentication, the CHAP_C generated by the 363 * During mutual authentication, the CHAP_C generated by the
356 * initiator must not match the original CHAP_C generated by 364 * initiator must not match the original CHAP_C generated by
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index fecb69535a15..5e71ac609418 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1216,7 +1216,7 @@ old_sess_out:
1216static int __iscsi_target_login_thread(struct iscsi_np *np) 1216static int __iscsi_target_login_thread(struct iscsi_np *np)
1217{ 1217{
1218 u8 *buffer, zero_tsih = 0; 1218 u8 *buffer, zero_tsih = 0;
1219 int ret = 0, rc, stop; 1219 int ret = 0, rc;
1220 struct iscsi_conn *conn = NULL; 1220 struct iscsi_conn *conn = NULL;
1221 struct iscsi_login *login; 1221 struct iscsi_login *login;
1222 struct iscsi_portal_group *tpg = NULL; 1222 struct iscsi_portal_group *tpg = NULL;
@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1230 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1230 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1231 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1231 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1232 complete(&np->np_restart_comp); 1232 complete(&np->np_restart_comp);
1233 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1234 spin_unlock_bh(&np->np_thread_lock);
1235 goto exit;
1233 } else { 1236 } else {
1234 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1237 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1235 } 1238 }
@@ -1422,10 +1425,8 @@ old_sess_out:
1422 } 1425 }
1423 1426
1424out: 1427out:
1425 stop = kthread_should_stop(); 1428 return 1;
1426 /* Wait for another socket.. */ 1429
1427 if (!stop)
1428 return 1;
1429exit: 1430exit:
1430 iscsi_stop_login_thread_timer(np); 1431 iscsi_stop_login_thread_timer(np);
1431 spin_lock_bh(&np->np_thread_lock); 1432 spin_lock_bh(&np->np_thread_lock);
@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
1442 1443
1443 allow_signal(SIGINT); 1444 allow_signal(SIGINT);
1444 1445
1445 while (!kthread_should_stop()) { 1446 while (1) {
1446 ret = __iscsi_target_login_thread(np); 1447 ret = __iscsi_target_login_thread(np);
1447 /* 1448 /*
1448 * We break and exit here unless another sock_accept() call 1449 * We break and exit here unless another sock_accept() call
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 53e157cb8c54..fd90b28f1d94 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
1295 login->login_failed = 1; 1295 login->login_failed = 1;
1296 iscsit_collect_login_stats(conn, status_class, status_detail); 1296 iscsit_collect_login_stats(conn, status_class, status_detail);
1297 1297
1298 memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1299
1298 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1300 hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1299 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1301 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1300 hdr->status_class = status_class; 1302 hdr->status_class = status_class;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6d2f37578b29..8c64b8776a96 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
239 return; 239 return;
240 240
241out_done: 241out_done:
242 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
242 sc->scsi_done(sc); 243 sc->scsi_done(sc);
243 return; 244 return;
244} 245}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 11d26fe65bfb..98da90167159 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -616,6 +616,7 @@ void core_dev_unexport(
616 dev->export_count--; 616 dev->export_count--;
617 spin_unlock(&hba->device_lock); 617 spin_unlock(&hba->device_lock);
618 618
619 lun->lun_sep = NULL;
619 lun->lun_se_dev = NULL; 620 lun->lun_se_dev = NULL;
620} 621}
621 622
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index a8aaf6ac2ae2..946562389ca8 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -129,7 +129,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
129 129
130 tc_device_get_irq(tdev); 130 tc_device_get_irq(tdev);
131 131
132 device_register(&tdev->dev); 132 if (device_register(&tdev->dev)) {
133 put_device(&tdev->dev);
134 goto out_err;
135 }
133 list_add_tail(&tdev->node, &tbus->devices); 136 list_add_tail(&tdev->node, &tbus->devices);
134 137
135out_err: 138out_err:
@@ -148,7 +151,10 @@ static int __init tc_init(void)
148 151
149 INIT_LIST_HEAD(&tc_bus.devices); 152 INIT_LIST_HEAD(&tc_bus.devices);
150 dev_set_name(&tc_bus.dev, "tc"); 153 dev_set_name(&tc_bus.dev, "tc");
151 device_register(&tc_bus.dev); 154 if (device_register(&tc_bus.dev)) {
155 put_device(&tc_bus.dev);
156 return 0;
157 }
152 158
153 if (tc_bus.info.slot_size) { 159 if (tc_bus.info.slot_size) {
154 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; 160 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c41aca4dfc43..72000a6d5af0 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
991 { } 991 { }
992}; 992};
993 993
994static int __init msm_serial_probe(struct platform_device *pdev) 994static int msm_serial_probe(struct platform_device *pdev)
995{ 995{
996 struct msm_port *msm_port; 996 struct msm_port *msm_port;
997 struct resource *resource; 997 struct resource *resource;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 971a760af4a1..8dae2f724a35 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -700,14 +700,6 @@ static void handle_rx_net(struct vhost_work *work)
700 handle_rx(net); 700 handle_rx(net);
701} 701}
702 702
703static void vhost_net_free(void *addr)
704{
705 if (is_vmalloc_addr(addr))
706 vfree(addr);
707 else
708 kfree(addr);
709}
710
711static int vhost_net_open(struct inode *inode, struct file *f) 703static int vhost_net_open(struct inode *inode, struct file *f)
712{ 704{
713 struct vhost_net *n; 705 struct vhost_net *n;
@@ -723,7 +715,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
723 } 715 }
724 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); 716 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
725 if (!vqs) { 717 if (!vqs) {
726 vhost_net_free(n); 718 kvfree(n);
727 return -ENOMEM; 719 return -ENOMEM;
728 } 720 }
729 721
@@ -840,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
840 * since jobs can re-queue themselves. */ 832 * since jobs can re-queue themselves. */
841 vhost_net_flush(n); 833 vhost_net_flush(n);
842 kfree(n->dev.vqs); 834 kfree(n->dev.vqs);
843 vhost_net_free(n); 835 kvfree(n);
844 return 0; 836 return 0;
845} 837}
846 838
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4f4ffa4c604e..69906cacd04f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1503,14 +1503,6 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1503 return 0; 1503 return 0;
1504} 1504}
1505 1505
1506static void vhost_scsi_free(struct vhost_scsi *vs)
1507{
1508 if (is_vmalloc_addr(vs))
1509 vfree(vs);
1510 else
1511 kfree(vs);
1512}
1513
1514static int vhost_scsi_open(struct inode *inode, struct file *f) 1506static int vhost_scsi_open(struct inode *inode, struct file *f)
1515{ 1507{
1516 struct vhost_scsi *vs; 1508 struct vhost_scsi *vs;
@@ -1550,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1550 return 0; 1542 return 0;
1551 1543
1552err_vqs: 1544err_vqs:
1553 vhost_scsi_free(vs); 1545 kvfree(vs);
1554err_vs: 1546err_vs:
1555 return r; 1547 return r;
1556} 1548}
@@ -1569,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1569 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1561 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1570 vhost_scsi_flush(vs); 1562 vhost_scsi_flush(vs);
1571 kfree(vs->dev.vqs); 1563 kfree(vs->dev.vqs);
1572 vhost_scsi_free(vs); 1564 kvfree(vs);
1573 return 0; 1565 return 0;
1574} 1566}
1575 1567
diff --git a/fs/aio.c b/fs/aio.c
index 4f078c054b41..955947ef3e02 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1021,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
1021 1021
1022 /* everything turned out well, dispose of the aiocb. */ 1022 /* everything turned out well, dispose of the aiocb. */
1023 kiocb_free(iocb); 1023 kiocb_free(iocb);
1024 put_reqs_available(ctx, 1);
1024 1025
1025 /* 1026 /*
1026 * We have to order our ring_info tail store above and test 1027 * We have to order our ring_info tail store above and test
@@ -1062,6 +1063,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
1062 if (head == tail) 1063 if (head == tail)
1063 goto out; 1064 goto out;
1064 1065
1066 head %= ctx->nr_events;
1067 tail %= ctx->nr_events;
1068
1065 while (ret < nr) { 1069 while (ret < nr) {
1066 long avail; 1070 long avail;
1067 struct io_event *ev; 1071 struct io_event *ev;
@@ -1100,8 +1104,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
1100 flush_dcache_page(ctx->ring_pages[0]); 1104 flush_dcache_page(ctx->ring_pages[0]);
1101 1105
1102 pr_debug("%li h%u t%u\n", ret, head, tail); 1106 pr_debug("%li h%u t%u\n", ret, head, tail);
1103
1104 put_reqs_available(ctx, ret);
1105out: 1107out:
1106 mutex_unlock(&ctx->ring_lock); 1108 mutex_unlock(&ctx->ring_lock);
1107 1109
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 0227b45ef00a..15e9505aa35f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -290,7 +290,8 @@ int
290cifsConvertToUTF16(__le16 *target, const char *source, int srclen, 290cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
291 const struct nls_table *cp, int mapChars) 291 const struct nls_table *cp, int mapChars)
292{ 292{
293 int i, j, charlen; 293 int i, charlen;
294 int j = 0;
294 char src_char; 295 char src_char;
295 __le16 dst_char; 296 __le16 dst_char;
296 wchar_t tmp; 297 wchar_t tmp;
@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
298 if (!mapChars) 299 if (!mapChars)
299 return cifs_strtoUTF16(target, source, PATH_MAX, cp); 300 return cifs_strtoUTF16(target, source, PATH_MAX, cp);
300 301
301 for (i = 0, j = 0; i < srclen; j++) { 302 for (i = 0; i < srclen; j++) {
302 src_char = source[i]; 303 src_char = source[i];
303 charlen = 1; 304 charlen = 1;
304 switch (src_char) { 305 switch (src_char) {
305 case 0: 306 case 0:
306 put_unaligned(0, &target[j]);
307 goto ctoUTF16_out; 307 goto ctoUTF16_out;
308 case ':': 308 case ':':
309 dst_char = cpu_to_le16(UNI_COLON); 309 dst_char = cpu_to_le16(UNI_COLON);
@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
350 } 350 }
351 351
352ctoUTF16_out: 352ctoUTF16_out:
353 put_unaligned(0, &target[j]); /* Null terminate target unicode string */
353 return j; 354 return j;
354} 355}
355 356
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2c90d07c0b3a..888398067420 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -725,6 +725,19 @@ out_nls:
725 goto out; 725 goto out;
726} 726}
727 727
728static ssize_t
729cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
730{
731 ssize_t rc;
732 struct inode *inode = file_inode(iocb->ki_filp);
733
734 rc = cifs_revalidate_mapping(inode);
735 if (rc)
736 return rc;
737
738 return generic_file_read_iter(iocb, iter);
739}
740
728static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 741static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
729{ 742{
730 struct inode *inode = file_inode(iocb->ki_filp); 743 struct inode *inode = file_inode(iocb->ki_filp);
@@ -881,7 +894,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
881const struct file_operations cifs_file_ops = { 894const struct file_operations cifs_file_ops = {
882 .read = new_sync_read, 895 .read = new_sync_read,
883 .write = new_sync_write, 896 .write = new_sync_write,
884 .read_iter = generic_file_read_iter, 897 .read_iter = cifs_loose_read_iter,
885 .write_iter = cifs_file_write_iter, 898 .write_iter = cifs_file_write_iter,
886 .open = cifs_open, 899 .open = cifs_open,
887 .release = cifs_close, 900 .release = cifs_close,
@@ -939,7 +952,7 @@ const struct file_operations cifs_file_direct_ops = {
939const struct file_operations cifs_file_nobrl_ops = { 952const struct file_operations cifs_file_nobrl_ops = {
940 .read = new_sync_read, 953 .read = new_sync_read,
941 .write = new_sync_write, 954 .write = new_sync_write,
942 .read_iter = generic_file_read_iter, 955 .read_iter = cifs_loose_read_iter,
943 .write_iter = cifs_file_write_iter, 956 .write_iter = cifs_file_write_iter,
944 .open = cifs_open, 957 .open = cifs_open,
945 .release = cifs_close, 958 .release = cifs_close,
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 264ece71bdb2..68559fd557fb 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -374,7 +374,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
374 oparms.cifs_sb = cifs_sb; 374 oparms.cifs_sb = cifs_sb;
375 oparms.desired_access = GENERIC_WRITE; 375 oparms.desired_access = GENERIC_WRITE;
376 oparms.create_options = create_options; 376 oparms.create_options = create_options;
377 oparms.disposition = FILE_OPEN; 377 oparms.disposition = FILE_CREATE;
378 oparms.path = path; 378 oparms.path = path;
379 oparms.fid = &fid; 379 oparms.fid = &fid;
380 oparms.reconnect = false; 380 oparms.reconnect = false;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c496f8a74639..9927913c97c2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -147,6 +147,17 @@ int nfs_sync_mapping(struct address_space *mapping)
147 return ret; 147 return ret;
148} 148}
149 149
150static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
151{
152 struct nfs_inode *nfsi = NFS_I(inode);
153
154 if (inode->i_mapping->nrpages == 0)
155 flags &= ~NFS_INO_INVALID_DATA;
156 nfsi->cache_validity |= flags;
157 if (flags & NFS_INO_INVALID_DATA)
158 nfs_fscache_invalidate(inode);
159}
160
150/* 161/*
151 * Invalidate the local caches 162 * Invalidate the local caches
152 */ 163 */
@@ -162,17 +173,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
162 173
163 memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); 174 memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 175 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
165 nfs_fscache_invalidate(inode); 176 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
167 | NFS_INO_INVALID_DATA 177 | NFS_INO_INVALID_DATA
168 | NFS_INO_INVALID_ACCESS 178 | NFS_INO_INVALID_ACCESS
169 | NFS_INO_INVALID_ACL 179 | NFS_INO_INVALID_ACL
170 | NFS_INO_REVAL_PAGECACHE; 180 | NFS_INO_REVAL_PAGECACHE);
171 } else 181 } else
172 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 182 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
173 | NFS_INO_INVALID_ACCESS 183 | NFS_INO_INVALID_ACCESS
174 | NFS_INO_INVALID_ACL 184 | NFS_INO_INVALID_ACL
175 | NFS_INO_REVAL_PAGECACHE; 185 | NFS_INO_REVAL_PAGECACHE);
176 nfs_zap_label_cache_locked(nfsi); 186 nfs_zap_label_cache_locked(nfsi);
177} 187}
178 188
@@ -187,8 +197,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
187{ 197{
188 if (mapping->nrpages != 0) { 198 if (mapping->nrpages != 0) {
189 spin_lock(&inode->i_lock); 199 spin_lock(&inode->i_lock);
190 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; 200 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
191 nfs_fscache_invalidate(inode);
192 spin_unlock(&inode->i_lock); 201 spin_unlock(&inode->i_lock);
193 } 202 }
194} 203}
@@ -209,7 +218,7 @@ EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
209void nfs_invalidate_atime(struct inode *inode) 218void nfs_invalidate_atime(struct inode *inode)
210{ 219{
211 spin_lock(&inode->i_lock); 220 spin_lock(&inode->i_lock);
212 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 221 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
213 spin_unlock(&inode->i_lock); 222 spin_unlock(&inode->i_lock);
214} 223}
215EXPORT_SYMBOL_GPL(nfs_invalidate_atime); 224EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
@@ -369,7 +378,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
369 inode->i_mode = fattr->mode; 378 inode->i_mode = fattr->mode;
370 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0 379 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
371 && nfs_server_capable(inode, NFS_CAP_MODE)) 380 && nfs_server_capable(inode, NFS_CAP_MODE))
372 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 381 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
373 /* Why so? Because we want revalidate for devices/FIFOs, and 382 /* Why so? Because we want revalidate for devices/FIFOs, and
374 * that's precisely what we have in nfs_file_inode_operations. 383 * that's precisely what we have in nfs_file_inode_operations.
375 */ 384 */
@@ -415,36 +424,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
415 if (fattr->valid & NFS_ATTR_FATTR_ATIME) 424 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
416 inode->i_atime = fattr->atime; 425 inode->i_atime = fattr->atime;
417 else if (nfs_server_capable(inode, NFS_CAP_ATIME)) 426 else if (nfs_server_capable(inode, NFS_CAP_ATIME))
418 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 427 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
419 if (fattr->valid & NFS_ATTR_FATTR_MTIME) 428 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
420 inode->i_mtime = fattr->mtime; 429 inode->i_mtime = fattr->mtime;
421 else if (nfs_server_capable(inode, NFS_CAP_MTIME)) 430 else if (nfs_server_capable(inode, NFS_CAP_MTIME))
422 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 431 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
423 if (fattr->valid & NFS_ATTR_FATTR_CTIME) 432 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
424 inode->i_ctime = fattr->ctime; 433 inode->i_ctime = fattr->ctime;
425 else if (nfs_server_capable(inode, NFS_CAP_CTIME)) 434 else if (nfs_server_capable(inode, NFS_CAP_CTIME))
426 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 435 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
427 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 436 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
428 inode->i_version = fattr->change_attr; 437 inode->i_version = fattr->change_attr;
429 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 438 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
430 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 439 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
431 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 440 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
432 inode->i_size = nfs_size_to_loff_t(fattr->size); 441 inode->i_size = nfs_size_to_loff_t(fattr->size);
433 else 442 else
434 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 443 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
435 | NFS_INO_REVAL_PAGECACHE; 444 | NFS_INO_REVAL_PAGECACHE);
436 if (fattr->valid & NFS_ATTR_FATTR_NLINK) 445 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
437 set_nlink(inode, fattr->nlink); 446 set_nlink(inode, fattr->nlink);
438 else if (nfs_server_capable(inode, NFS_CAP_NLINK)) 447 else if (nfs_server_capable(inode, NFS_CAP_NLINK))
439 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 448 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
440 if (fattr->valid & NFS_ATTR_FATTR_OWNER) 449 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
441 inode->i_uid = fattr->uid; 450 inode->i_uid = fattr->uid;
442 else if (nfs_server_capable(inode, NFS_CAP_OWNER)) 451 else if (nfs_server_capable(inode, NFS_CAP_OWNER))
443 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 452 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
444 if (fattr->valid & NFS_ATTR_FATTR_GROUP) 453 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
445 inode->i_gid = fattr->gid; 454 inode->i_gid = fattr->gid;
446 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) 455 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
447 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 456 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
448 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) 457 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
449 inode->i_blocks = fattr->du.nfs2.blocks; 458 inode->i_blocks = fattr->du.nfs2.blocks;
450 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { 459 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -550,6 +559,9 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
550 559
551 spin_lock(&inode->i_lock); 560 spin_lock(&inode->i_lock);
552 i_size_write(inode, offset); 561 i_size_write(inode, offset);
562 /* Optimisation */
563 if (offset == 0)
564 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
553 spin_unlock(&inode->i_lock); 565 spin_unlock(&inode->i_lock);
554 566
555 truncate_pagecache(inode, offset); 567 truncate_pagecache(inode, offset);
@@ -578,7 +590,8 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
578 inode->i_uid = attr->ia_uid; 590 inode->i_uid = attr->ia_uid;
579 if ((attr->ia_valid & ATTR_GID) != 0) 591 if ((attr->ia_valid & ATTR_GID) != 0)
580 inode->i_gid = attr->ia_gid; 592 inode->i_gid = attr->ia_gid;
581 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 593 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
594 | NFS_INO_INVALID_ACL);
582 spin_unlock(&inode->i_lock); 595 spin_unlock(&inode->i_lock);
583 } 596 }
584 if ((attr->ia_valid & ATTR_SIZE) != 0) { 597 if ((attr->ia_valid & ATTR_SIZE) != 0) {
@@ -1101,7 +1114,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1101 && inode->i_version == fattr->pre_change_attr) { 1114 && inode->i_version == fattr->pre_change_attr) {
1102 inode->i_version = fattr->change_attr; 1115 inode->i_version = fattr->change_attr;
1103 if (S_ISDIR(inode->i_mode)) 1116 if (S_ISDIR(inode->i_mode))
1104 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1117 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1105 ret |= NFS_INO_INVALID_ATTR; 1118 ret |= NFS_INO_INVALID_ATTR;
1106 } 1119 }
1107 /* If we have atomic WCC data, we may update some attributes */ 1120 /* If we have atomic WCC data, we may update some attributes */
@@ -1117,7 +1130,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1117 && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { 1130 && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
1118 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); 1131 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
1119 if (S_ISDIR(inode->i_mode)) 1132 if (S_ISDIR(inode->i_mode))
1120 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1133 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1121 ret |= NFS_INO_INVALID_ATTR; 1134 ret |= NFS_INO_INVALID_ATTR;
1122 } 1135 }
1123 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE) 1136 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
@@ -1128,9 +1141,6 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1128 ret |= NFS_INO_INVALID_ATTR; 1141 ret |= NFS_INO_INVALID_ATTR;
1129 } 1142 }
1130 1143
1131 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1132 nfs_fscache_invalidate(inode);
1133
1134 return ret; 1144 return ret;
1135} 1145}
1136 1146
@@ -1189,7 +1199,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1189 invalid |= NFS_INO_INVALID_ATIME; 1199 invalid |= NFS_INO_INVALID_ATIME;
1190 1200
1191 if (invalid != 0) 1201 if (invalid != 0)
1192 nfsi->cache_validity |= invalid; 1202 nfs_set_cache_invalid(inode, invalid);
1193 1203
1194 nfsi->read_cache_jiffies = fattr->time_start; 1204 nfsi->read_cache_jiffies = fattr->time_start;
1195 return 0; 1205 return 0;
@@ -1402,13 +1412,11 @@ EXPORT_SYMBOL_GPL(nfs_refresh_inode);
1402 1412
1403static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr) 1413static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
1404{ 1414{
1405 struct nfs_inode *nfsi = NFS_I(inode); 1415 unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1406 1416
1407 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1417 if (S_ISDIR(inode->i_mode))
1408 if (S_ISDIR(inode->i_mode)) { 1418 invalid |= NFS_INO_INVALID_DATA;
1409 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1419 nfs_set_cache_invalid(inode, invalid);
1410 nfs_fscache_invalidate(inode);
1411 }
1412 if ((fattr->valid & NFS_ATTR_FATTR) == 0) 1420 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
1413 return 0; 1421 return 0;
1414 return nfs_refresh_inode_locked(inode, fattr); 1422 return nfs_refresh_inode_locked(inode, fattr);
@@ -1601,6 +1609,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1601 if ((nfsi->npages == 0) || new_isize > cur_isize) { 1609 if ((nfsi->npages == 0) || new_isize > cur_isize) {
1602 i_size_write(inode, new_isize); 1610 i_size_write(inode, new_isize);
1603 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1611 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1612 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1604 } 1613 }
1605 dprintk("NFS: isize change on server for file %s/%ld " 1614 dprintk("NFS: isize change on server for file %s/%ld "
1606 "(%Ld to %Ld)\n", 1615 "(%Ld to %Ld)\n",
@@ -1702,10 +1711,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1702 invalid &= ~NFS_INO_INVALID_DATA; 1711 invalid &= ~NFS_INO_INVALID_DATA;
1703 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) || 1712 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
1704 (save_cache_validity & NFS_INO_REVAL_FORCED)) 1713 (save_cache_validity & NFS_INO_REVAL_FORCED))
1705 nfsi->cache_validity |= invalid; 1714 nfs_set_cache_invalid(inode, invalid);
1706
1707 if (invalid & NFS_INO_INVALID_DATA)
1708 nfs_fscache_invalidate(inode);
1709 1715
1710 return 0; 1716 return 0;
1711 out_err: 1717 out_err:
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index f63cb87cd730..ba2affa51941 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -230,7 +230,7 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
230extern struct file_system_type nfs4_fs_type; 230extern struct file_system_type nfs4_fs_type;
231 231
232/* nfs4namespace.c */ 232/* nfs4namespace.c */
233struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); 233struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
234struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *, 234struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
235 struct nfs_fh *, struct nfs_fattr *); 235 struct nfs_fh *, struct nfs_fattr *);
236int nfs4_replace_transport(struct nfs_server *server, 236int nfs4_replace_transport(struct nfs_server *server,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 3d5dbf80d46a..3d83cb1fdc70 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -139,16 +139,22 @@ static size_t nfs_parse_server_name(char *string, size_t len,
139 * @server: NFS server struct 139 * @server: NFS server struct
140 * @flavors: List of security tuples returned by SECINFO procedure 140 * @flavors: List of security tuples returned by SECINFO procedure
141 * 141 *
142 * Return the pseudoflavor of the first security mechanism in 142 * Return an rpc client that uses the first security mechanism in
143 * "flavors" that is locally supported. Return RPC_AUTH_UNIX if 143 * "flavors" that is locally supported. The "flavors" array
144 * no matching flavor is found in the array. The "flavors" array
145 * is searched in the order returned from the server, per RFC 3530 144 * is searched in the order returned from the server, per RFC 3530
146 * recommendation. 145 * recommendation and each flavor is checked for membership in the
146 * sec= mount option list if it exists.
147 *
148 * Return -EPERM if no matching flavor is found in the array.
149 *
150 * Please call rpc_shutdown_client() when you are done with this rpc client.
151 *
147 */ 152 */
148static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server, 153static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
154 struct nfs_server *server,
149 struct nfs4_secinfo_flavors *flavors) 155 struct nfs4_secinfo_flavors *flavors)
150{ 156{
151 rpc_authflavor_t pseudoflavor; 157 rpc_authflavor_t pflavor;
152 struct nfs4_secinfo4 *secinfo; 158 struct nfs4_secinfo4 *secinfo;
153 unsigned int i; 159 unsigned int i;
154 160
@@ -159,62 +165,73 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
159 case RPC_AUTH_NULL: 165 case RPC_AUTH_NULL:
160 case RPC_AUTH_UNIX: 166 case RPC_AUTH_UNIX:
161 case RPC_AUTH_GSS: 167 case RPC_AUTH_GSS:
162 pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor, 168 pflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
163 &secinfo->flavor_info); 169 &secinfo->flavor_info);
164 /* make sure pseudoflavor matches sec= mount opt */ 170 /* does the pseudoflavor match a sec= mount opt? */
165 if (pseudoflavor != RPC_AUTH_MAXFLAVOR && 171 if (pflavor != RPC_AUTH_MAXFLAVOR &&
166 nfs_auth_info_match(&server->auth_info, 172 nfs_auth_info_match(&server->auth_info, pflavor)) {
167 pseudoflavor)) 173 struct rpc_clnt *new;
168 return pseudoflavor; 174 struct rpc_cred *cred;
169 break; 175
176 /* Cloning creates an rpc_auth for the flavor */
177 new = rpc_clone_client_set_auth(clnt, pflavor);
178 if (IS_ERR(new))
179 continue;
180 /**
181 * Check that the user actually can use the
182 * flavor. This is mostly for RPC_AUTH_GSS
183 * where cr_init obtains a gss context
184 */
185 cred = rpcauth_lookupcred(new->cl_auth, 0);
186 if (IS_ERR(cred)) {
187 rpc_shutdown_client(new);
188 continue;
189 }
190 put_rpccred(cred);
191 return new;
192 }
170 } 193 }
171 } 194 }
172 195 return ERR_PTR(-EPERM);
173 /* if there were any sec= options then nothing matched */
174 if (server->auth_info.flavor_len > 0)
175 return -EPERM;
176
177 return RPC_AUTH_UNIX;
178} 196}
179 197
180static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name) 198/**
199 * nfs4_negotiate_security - in response to an NFS4ERR_WRONGSEC on lookup,
200 * return an rpc_clnt that uses the best available security flavor with
201 * respect to the secinfo flavor list and the sec= mount options.
202 *
203 * @clnt: RPC client to clone
204 * @inode: directory inode
205 * @name: lookup name
206 *
207 * Please call rpc_shutdown_client() when you are done with this rpc client.
208 */
209struct rpc_clnt *
210nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
211 struct qstr *name)
181{ 212{
182 struct page *page; 213 struct page *page;
183 struct nfs4_secinfo_flavors *flavors; 214 struct nfs4_secinfo_flavors *flavors;
184 rpc_authflavor_t flavor; 215 struct rpc_clnt *new;
185 int err; 216 int err;
186 217
187 page = alloc_page(GFP_KERNEL); 218 page = alloc_page(GFP_KERNEL);
188 if (!page) 219 if (!page)
189 return -ENOMEM; 220 return ERR_PTR(-ENOMEM);
221
190 flavors = page_address(page); 222 flavors = page_address(page);
191 223
192 err = nfs4_proc_secinfo(inode, name, flavors); 224 err = nfs4_proc_secinfo(inode, name, flavors);
193 if (err < 0) { 225 if (err < 0) {
194 flavor = err; 226 new = ERR_PTR(err);
195 goto out; 227 goto out;
196 } 228 }
197 229
198 flavor = nfs_find_best_sec(NFS_SERVER(inode), flavors); 230 new = nfs_find_best_sec(clnt, NFS_SERVER(inode), flavors);
199 231
200out: 232out:
201 put_page(page); 233 put_page(page);
202 return flavor; 234 return new;
203}
204
205/*
206 * Please call rpc_shutdown_client() when you are done with this client.
207 */
208struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
209 struct qstr *name)
210{
211 rpc_authflavor_t flavor;
212
213 flavor = nfs4_negotiate_security(inode, name);
214 if ((int)flavor < 0)
215 return ERR_PTR((int)flavor);
216
217 return rpc_clone_client_set_auth(clnt, flavor);
218} 235}
219 236
220static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, 237static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
@@ -397,11 +414,6 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
397 414
398 if (client->cl_auth->au_flavor != flavor) 415 if (client->cl_auth->au_flavor != flavor)
399 flavor = client->cl_auth->au_flavor; 416 flavor = client->cl_auth->au_flavor;
400 else {
401 rpc_authflavor_t new = nfs4_negotiate_security(dir, name);
402 if ((int)new >= 0)
403 flavor = new;
404 }
405 mnt = nfs_do_submount(dentry, fh, fattr, flavor); 417 mnt = nfs_do_submount(dentry, fh, fattr, flavor);
406out: 418out:
407 rpc_shutdown_client(client); 419 rpc_shutdown_client(client);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 285ad5334018..4bf3d97cc5a0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3247,7 +3247,7 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3247 err = -EPERM; 3247 err = -EPERM;
3248 if (client != *clnt) 3248 if (client != *clnt)
3249 goto out; 3249 goto out;
3250 client = nfs4_create_sec_client(client, dir, name); 3250 client = nfs4_negotiate_security(client, dir, name);
3251 if (IS_ERR(client)) 3251 if (IS_ERR(client))
3252 return PTR_ERR(client); 3252 return PTR_ERR(client);
3253 3253
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3ee5af4e738e..98ff061ccaf3 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -934,12 +934,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
934 934
935 if (nfs_have_delegated_attributes(inode)) 935 if (nfs_have_delegated_attributes(inode))
936 goto out; 936 goto out;
937 if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE)) 937 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
938 return false; 938 return false;
939 smp_rmb(); 939 smp_rmb();
940 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 940 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
941 return false; 941 return false;
942out: 942out:
943 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
944 return false;
943 return PageUptodate(page) != 0; 945 return PageUptodate(page) != 0;
944} 946}
945 947
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index a106b3f2b22a..fae17c640df3 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -331,6 +331,7 @@ struct dlm_lock_resource
331 u16 state; 331 u16 state;
332 char lvb[DLM_LVB_LEN]; 332 char lvb[DLM_LVB_LEN];
333 unsigned int inflight_locks; 333 unsigned int inflight_locks;
334 unsigned int inflight_assert_workers;
334 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 335 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
335}; 336};
336 337
@@ -910,6 +911,9 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
910void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 911void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
911 struct dlm_lock_resource *res); 912 struct dlm_lock_resource *res);
912 913
914void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
915 struct dlm_lock_resource *res);
916
913void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 917void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
914void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 918void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
915void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 919void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3087a21d32f9..82abf0cc9a12 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -581,6 +581,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
581 atomic_set(&res->asts_reserved, 0); 581 atomic_set(&res->asts_reserved, 0);
582 res->migration_pending = 0; 582 res->migration_pending = 0;
583 res->inflight_locks = 0; 583 res->inflight_locks = 0;
584 res->inflight_assert_workers = 0;
584 585
585 res->dlm = dlm; 586 res->dlm = dlm;
586 587
@@ -683,6 +684,43 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
683 wake_up(&res->wq); 684 wake_up(&res->wq);
684} 685}
685 686
687void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
688 struct dlm_lock_resource *res)
689{
690 assert_spin_locked(&res->spinlock);
691 res->inflight_assert_workers++;
692 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
693 dlm->name, res->lockname.len, res->lockname.name,
694 res->inflight_assert_workers);
695}
696
697static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
698 struct dlm_lock_resource *res)
699{
700 spin_lock(&res->spinlock);
701 __dlm_lockres_grab_inflight_worker(dlm, res);
702 spin_unlock(&res->spinlock);
703}
704
705static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
706 struct dlm_lock_resource *res)
707{
708 assert_spin_locked(&res->spinlock);
709 BUG_ON(res->inflight_assert_workers == 0);
710 res->inflight_assert_workers--;
711 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
712 dlm->name, res->lockname.len, res->lockname.name,
713 res->inflight_assert_workers);
714}
715
716static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
717 struct dlm_lock_resource *res)
718{
719 spin_lock(&res->spinlock);
720 __dlm_lockres_drop_inflight_worker(dlm, res);
721 spin_unlock(&res->spinlock);
722}
723
686/* 724/*
687 * lookup a lock resource by name. 725 * lookup a lock resource by name.
688 * may already exist in the hashtable. 726 * may already exist in the hashtable.
@@ -1603,7 +1641,8 @@ send_response:
1603 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1641 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1604 response = DLM_MASTER_RESP_ERROR; 1642 response = DLM_MASTER_RESP_ERROR;
1605 dlm_lockres_put(res); 1643 dlm_lockres_put(res);
1606 } 1644 } else
1645 dlm_lockres_grab_inflight_worker(dlm, res);
1607 } else { 1646 } else {
1608 if (res) 1647 if (res)
1609 dlm_lockres_put(res); 1648 dlm_lockres_put(res);
@@ -2118,6 +2157,8 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2118 dlm_lockres_release_ast(dlm, res); 2157 dlm_lockres_release_ast(dlm, res);
2119 2158
2120put: 2159put:
2160 dlm_lockres_drop_inflight_worker(dlm, res);
2161
2121 dlm_lockres_put(res); 2162 dlm_lockres_put(res);
2122 2163
2123 mlog(0, "finished with dlm_assert_master_worker\n"); 2164 mlog(0, "finished with dlm_assert_master_worker\n");
@@ -3088,11 +3129,15 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3088 /* remove it so that only one mle will be found */ 3129 /* remove it so that only one mle will be found */
3089 __dlm_unlink_mle(dlm, tmp); 3130 __dlm_unlink_mle(dlm, tmp);
3090 __dlm_mle_detach_hb_events(dlm, tmp); 3131 __dlm_mle_detach_hb_events(dlm, tmp);
3091 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3132 if (tmp->type == DLM_MLE_MASTER) {
3092 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3133 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3093 "telling master to get ref for cleared out mle " 3134 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3094 "during migration\n", dlm->name, namelen, name, 3135 "telling master to get ref "
3095 master, new_master); 3136 "for cleared out mle during "
3137 "migration\n", dlm->name,
3138 namelen, name, master,
3139 new_master);
3140 }
3096 } 3141 }
3097 spin_unlock(&tmp->spinlock); 3142 spin_unlock(&tmp->spinlock);
3098 } 3143 }
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 5de019437ea5..45067faf5695 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1708,7 +1708,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1708 mlog_errno(-ENOMEM); 1708 mlog_errno(-ENOMEM);
1709 /* retry!? */ 1709 /* retry!? */
1710 BUG(); 1710 BUG();
1711 } 1711 } else
1712 __dlm_lockres_grab_inflight_worker(dlm, res);
1712 } else /* put.. incase we are not the master */ 1713 } else /* put.. incase we are not the master */
1713 dlm_lockres_put(res); 1714 dlm_lockres_put(res);
1714 spin_unlock(&res->spinlock); 1715 spin_unlock(&res->spinlock);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 9db869de829d..69aac6f088ad 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -259,12 +259,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
259 * refs on it. */ 259 * refs on it. */
260 unused = __dlm_lockres_unused(lockres); 260 unused = __dlm_lockres_unused(lockres);
261 if (!unused || 261 if (!unused ||
262 (lockres->state & DLM_LOCK_RES_MIGRATING)) { 262 (lockres->state & DLM_LOCK_RES_MIGRATING) ||
263 (lockres->inflight_assert_workers != 0)) {
263 mlog(0, "%s: res %.*s is in use or being remastered, " 264 mlog(0, "%s: res %.*s is in use or being remastered, "
264 "used %d, state %d\n", dlm->name, 265 "used %d, state %d, assert master workers %u\n",
265 lockres->lockname.len, lockres->lockname.name, 266 dlm->name, lockres->lockname.len,
266 !unused, lockres->state); 267 lockres->lockname.name,
267 list_move_tail(&dlm->purge_list, &lockres->purge); 268 !unused, lockres->state,
269 lockres->inflight_assert_workers);
270 list_move_tail(&lockres->purge, &dlm->purge_list);
268 spin_unlock(&lockres->spinlock); 271 spin_unlock(&lockres->spinlock);
269 continue; 272 continue;
270 } 273 }
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 5698b52cf5c9..2e3c9dbab68c 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -191,7 +191,9 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
191 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 191 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
192 } else if (status == DLM_RECOVERING || 192 } else if (status == DLM_RECOVERING ||
193 status == DLM_MIGRATING || 193 status == DLM_MIGRATING ||
194 status == DLM_FORWARD) { 194 status == DLM_FORWARD ||
195 status == DLM_NOLOCKMGR
196 ) {
195 /* must clear the actions because this unlock 197 /* must clear the actions because this unlock
196 * is about to be retried. cannot free or do 198 * is about to be retried. cannot free or do
197 * any list manipulation. */ 199 * any list manipulation. */
@@ -200,7 +202,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
200 res->lockname.name, 202 res->lockname.name,
201 status==DLM_RECOVERING?"recovering": 203 status==DLM_RECOVERING?"recovering":
202 (status==DLM_MIGRATING?"migrating": 204 (status==DLM_MIGRATING?"migrating":
203 "forward")); 205 (status == DLM_FORWARD ? "forward" :
206 "nolockmanager")));
204 actions = 0; 207 actions = 0;
205 } 208 }
206 if (flags & LKM_CANCEL) 209 if (flags & LKM_CANCEL)
@@ -364,7 +367,10 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
364 * updated state to the recovery master. this thread 367 * updated state to the recovery master. this thread
365 * just needs to finish out the operation and call 368 * just needs to finish out the operation and call
366 * the unlockast. */ 369 * the unlockast. */
367 ret = DLM_NORMAL; 370 if (dlm_is_node_dead(dlm, owner))
371 ret = DLM_NORMAL;
372 else
373 ret = DLM_NOLOCKMGR;
368 } else { 374 } else {
369 /* something bad. this will BUG in ocfs2 */ 375 /* something bad. this will BUG in ocfs2 */
370 ret = dlm_err_to_dlm_status(tmpret); 376 ret = dlm_err_to_dlm_status(tmpret);
@@ -638,7 +644,9 @@ retry:
638 644
639 if (status == DLM_RECOVERING || 645 if (status == DLM_RECOVERING ||
640 status == DLM_MIGRATING || 646 status == DLM_MIGRATING ||
641 status == DLM_FORWARD) { 647 status == DLM_FORWARD ||
648 status == DLM_NOLOCKMGR) {
649
642 /* We want to go away for a tiny bit to allow recovery 650 /* We want to go away for a tiny bit to allow recovery
643 * / migration to complete on this resource. I don't 651 * / migration to complete on this resource. I don't
644 * know of any wait queue we could sleep on as this 652 * know of any wait queue we could sleep on as this
@@ -650,7 +658,7 @@ retry:
650 msleep(50); 658 msleep(50);
651 659
652 mlog(0, "retrying unlock due to pending recovery/" 660 mlog(0, "retrying unlock due to pending recovery/"
653 "migration/in-progress\n"); 661 "migration/in-progress/reconnect\n");
654 goto retry; 662 goto retry;
655 } 663 }
656 664
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 2060fc398445..8add6f1030d7 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -205,6 +205,21 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
205 return inode; 205 return inode;
206} 206}
207 207
208static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
209 struct dentry *dentry, struct inode *inode)
210{
211 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
212
213 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
214 ocfs2_lock_res_free(&dl->dl_lockres);
215 BUG_ON(dl->dl_count != 1);
216 spin_lock(&dentry_attach_lock);
217 dentry->d_fsdata = NULL;
218 spin_unlock(&dentry_attach_lock);
219 kfree(dl);
220 iput(inode);
221}
222
208static int ocfs2_mknod(struct inode *dir, 223static int ocfs2_mknod(struct inode *dir,
209 struct dentry *dentry, 224 struct dentry *dentry,
210 umode_t mode, 225 umode_t mode,
@@ -231,6 +246,7 @@ static int ocfs2_mknod(struct inode *dir,
231 sigset_t oldset; 246 sigset_t oldset;
232 int did_block_signals = 0; 247 int did_block_signals = 0;
233 struct posix_acl *default_acl = NULL, *acl = NULL; 248 struct posix_acl *default_acl = NULL, *acl = NULL;
249 struct ocfs2_dentry_lock *dl = NULL;
234 250
235 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, 251 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
236 (unsigned long long)OCFS2_I(dir)->ip_blkno, 252 (unsigned long long)OCFS2_I(dir)->ip_blkno,
@@ -423,6 +439,8 @@ static int ocfs2_mknod(struct inode *dir,
423 goto leave; 439 goto leave;
424 } 440 }
425 441
442 dl = dentry->d_fsdata;
443
426 status = ocfs2_add_entry(handle, dentry, inode, 444 status = ocfs2_add_entry(handle, dentry, inode,
427 OCFS2_I(inode)->ip_blkno, parent_fe_bh, 445 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
428 &lookup); 446 &lookup);
@@ -469,6 +487,9 @@ leave:
469 * ocfs2_delete_inode will mutex_lock again. 487 * ocfs2_delete_inode will mutex_lock again.
470 */ 488 */
471 if ((status < 0) && inode) { 489 if ((status < 0) && inode) {
490 if (dl)
491 ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
492
472 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR; 493 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
473 clear_nlink(inode); 494 clear_nlink(inode);
474 iput(inode); 495 iput(inode);
@@ -991,6 +1012,65 @@ leave:
991 return status; 1012 return status;
992} 1013}
993 1014
1015static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
1016 u64 src_inode_no, u64 dest_inode_no)
1017{
1018 int ret = 0, i = 0;
1019 u64 parent_inode_no = 0;
1020 u64 child_inode_no = src_inode_no;
1021 struct inode *child_inode;
1022
1023#define MAX_LOOKUP_TIMES 32
1024 while (1) {
1025 child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
1026 if (IS_ERR(child_inode)) {
1027 ret = PTR_ERR(child_inode);
1028 break;
1029 }
1030
1031 ret = ocfs2_inode_lock(child_inode, NULL, 0);
1032 if (ret < 0) {
1033 iput(child_inode);
1034 if (ret != -ENOENT)
1035 mlog_errno(ret);
1036 break;
1037 }
1038
1039 ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
1040 &parent_inode_no);
1041 ocfs2_inode_unlock(child_inode, 0);
1042 iput(child_inode);
1043 if (ret < 0) {
1044 ret = -ENOENT;
1045 break;
1046 }
1047
1048 if (parent_inode_no == dest_inode_no) {
1049 ret = 1;
1050 break;
1051 }
1052
1053 if (parent_inode_no == osb->root_inode->i_ino) {
1054 ret = 0;
1055 break;
1056 }
1057
1058 child_inode_no = parent_inode_no;
1059
1060 if (++i >= MAX_LOOKUP_TIMES) {
1061 mlog(ML_NOTICE, "max lookup times reached, filesystem "
1062 "may have nested directories, "
1063 "src inode: %llu, dest inode: %llu.\n",
1064 (unsigned long long)src_inode_no,
1065 (unsigned long long)dest_inode_no);
1066 ret = 0;
1067 break;
1068 }
1069 }
1070
1071 return ret;
1072}
1073
994/* 1074/*
995 * The only place this should be used is rename! 1075 * The only place this should be used is rename!
996 * if they have the same id, then the 1st one is the only one locked. 1076 * if they have the same id, then the 1st one is the only one locked.
@@ -1002,6 +1082,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1002 struct inode *inode2) 1082 struct inode *inode2)
1003{ 1083{
1004 int status; 1084 int status;
1085 int inode1_is_ancestor, inode2_is_ancestor;
1005 struct ocfs2_inode_info *oi1 = OCFS2_I(inode1); 1086 struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
1006 struct ocfs2_inode_info *oi2 = OCFS2_I(inode2); 1087 struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
1007 struct buffer_head **tmpbh; 1088 struct buffer_head **tmpbh;
@@ -1015,9 +1096,26 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1015 if (*bh2) 1096 if (*bh2)
1016 *bh2 = NULL; 1097 *bh2 = NULL;
1017 1098
1018 /* we always want to lock the one with the lower lockid first. */ 1099 /* we always want to lock the one with the lower lockid first.
1100 * and if they are nested, we lock ancestor first */
1019 if (oi1->ip_blkno != oi2->ip_blkno) { 1101 if (oi1->ip_blkno != oi2->ip_blkno) {
1020 if (oi1->ip_blkno < oi2->ip_blkno) { 1102 inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
1103 oi1->ip_blkno);
1104 if (inode1_is_ancestor < 0) {
1105 status = inode1_is_ancestor;
1106 goto bail;
1107 }
1108
1109 inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
1110 oi2->ip_blkno);
1111 if (inode2_is_ancestor < 0) {
1112 status = inode2_is_ancestor;
1113 goto bail;
1114 }
1115
1116 if ((inode1_is_ancestor == 1) ||
1117 (oi1->ip_blkno < oi2->ip_blkno &&
1118 inode2_is_ancestor == 0)) {
1021 /* switch id1 and id2 around */ 1119 /* switch id1 and id2 around */
1022 tmpbh = bh2; 1120 tmpbh = bh2;
1023 bh2 = bh1; 1121 bh2 = bh1;
@@ -1098,6 +1196,7 @@ static int ocfs2_rename(struct inode *old_dir,
1098 struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, }; 1196 struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
1099 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 1197 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
1100 struct ocfs2_dir_lookup_result target_insert = { NULL, }; 1198 struct ocfs2_dir_lookup_result target_insert = { NULL, };
1199 bool should_add_orphan = false;
1101 1200
1102 /* At some point it might be nice to break this function up a 1201 /* At some point it might be nice to break this function up a
1103 * bit. */ 1202 * bit. */
@@ -1134,6 +1233,21 @@ static int ocfs2_rename(struct inode *old_dir,
1134 goto bail; 1233 goto bail;
1135 } 1234 }
1136 rename_lock = 1; 1235 rename_lock = 1;
1236
1237 /* here we cannot guarantee the inodes haven't just been
1238 * changed, so check if they are nested again */
1239 status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
1240 old_inode->i_ino);
1241 if (status < 0) {
1242 mlog_errno(status);
1243 goto bail;
1244 } else if (status == 1) {
1245 status = -EPERM;
1246 trace_ocfs2_rename_not_permitted(
1247 (unsigned long long)old_inode->i_ino,
1248 (unsigned long long)new_dir->i_ino);
1249 goto bail;
1250 }
1137 } 1251 }
1138 1252
1139 /* if old and new are the same, this'll just do one lock. */ 1253 /* if old and new are the same, this'll just do one lock. */
@@ -1304,6 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
1304 mlog_errno(status); 1418 mlog_errno(status);
1305 goto bail; 1419 goto bail;
1306 } 1420 }
1421 should_add_orphan = true;
1307 } 1422 }
1308 } else { 1423 } else {
1309 BUG_ON(new_dentry->d_parent->d_inode != new_dir); 1424 BUG_ON(new_dentry->d_parent->d_inode != new_dir);
@@ -1348,17 +1463,6 @@ static int ocfs2_rename(struct inode *old_dir,
1348 goto bail; 1463 goto bail;
1349 } 1464 }
1350 1465
1351 if (S_ISDIR(new_inode->i_mode) ||
1352 (ocfs2_read_links_count(newfe) == 1)) {
1353 status = ocfs2_orphan_add(osb, handle, new_inode,
1354 newfe_bh, orphan_name,
1355 &orphan_insert, orphan_dir);
1356 if (status < 0) {
1357 mlog_errno(status);
1358 goto bail;
1359 }
1360 }
1361
1362 /* change the dirent to point to the correct inode */ 1466 /* change the dirent to point to the correct inode */
1363 status = ocfs2_update_entry(new_dir, handle, &target_lookup_res, 1467 status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
1364 old_inode); 1468 old_inode);
@@ -1373,6 +1477,15 @@ static int ocfs2_rename(struct inode *old_dir,
1373 else 1477 else
1374 ocfs2_add_links_count(newfe, -1); 1478 ocfs2_add_links_count(newfe, -1);
1375 ocfs2_journal_dirty(handle, newfe_bh); 1479 ocfs2_journal_dirty(handle, newfe_bh);
1480 if (should_add_orphan) {
1481 status = ocfs2_orphan_add(osb, handle, new_inode,
1482 newfe_bh, orphan_name,
1483 &orphan_insert, orphan_dir);
1484 if (status < 0) {
1485 mlog_errno(status);
1486 goto bail;
1487 }
1488 }
1376 } else { 1489 } else {
1377 /* if the name was not found in new_dir, add it now */ 1490 /* if the name was not found in new_dir, add it now */
1378 status = ocfs2_add_entry(handle, new_dentry, old_inode, 1491 status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1642,6 +1755,7 @@ static int ocfs2_symlink(struct inode *dir,
1642 struct ocfs2_dir_lookup_result lookup = { NULL, }; 1755 struct ocfs2_dir_lookup_result lookup = { NULL, };
1643 sigset_t oldset; 1756 sigset_t oldset;
1644 int did_block_signals = 0; 1757 int did_block_signals = 0;
1758 struct ocfs2_dentry_lock *dl = NULL;
1645 1759
1646 trace_ocfs2_symlink_begin(dir, dentry, symname, 1760 trace_ocfs2_symlink_begin(dir, dentry, symname,
1647 dentry->d_name.len, dentry->d_name.name); 1761 dentry->d_name.len, dentry->d_name.name);
@@ -1830,6 +1944,8 @@ static int ocfs2_symlink(struct inode *dir,
1830 goto bail; 1944 goto bail;
1831 } 1945 }
1832 1946
1947 dl = dentry->d_fsdata;
1948
1833 status = ocfs2_add_entry(handle, dentry, inode, 1949 status = ocfs2_add_entry(handle, dentry, inode,
1834 le64_to_cpu(fe->i_blkno), parent_fe_bh, 1950 le64_to_cpu(fe->i_blkno), parent_fe_bh,
1835 &lookup); 1951 &lookup);
@@ -1864,6 +1980,9 @@ bail:
1864 if (xattr_ac) 1980 if (xattr_ac)
1865 ocfs2_free_alloc_context(xattr_ac); 1981 ocfs2_free_alloc_context(xattr_ac);
1866 if ((status < 0) && inode) { 1982 if ((status < 0) && inode) {
1983 if (dl)
1984 ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
1985
1867 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR; 1986 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
1868 clear_nlink(inode); 1987 clear_nlink(inode);
1869 iput(inode); 1988 iput(inode);
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 1b60c62aa9d6..6cb019b7c6a8 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2292,6 +2292,8 @@ TRACE_EVENT(ocfs2_rename,
2292 __entry->new_len, __get_str(new_name)) 2292 __entry->new_len, __get_str(new_name))
2293); 2293);
2294 2294
2295DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_rename_not_permitted);
2296
2295TRACE_EVENT(ocfs2_rename_target_exists, 2297TRACE_EVENT(ocfs2_rename_target_exists,
2296 TP_PROTO(int new_len, const char *new_name), 2298 TP_PROTO(int new_len, const char *new_name),
2297 TP_ARGS(new_len, new_name), 2299 TP_ARGS(new_len, new_name),
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 714e53b9cc66..636aab69ead5 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4288,9 +4288,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4288 goto out; 4288 goto out;
4289 } 4289 }
4290 4290
4291 error = ocfs2_rw_lock(inode, 1);
4292 if (error) {
4293 mlog_errno(error);
4294 goto out;
4295 }
4296
4291 error = ocfs2_inode_lock(inode, &old_bh, 1); 4297 error = ocfs2_inode_lock(inode, &old_bh, 1);
4292 if (error) { 4298 if (error) {
4293 mlog_errno(error); 4299 mlog_errno(error);
4300 ocfs2_rw_unlock(inode, 1);
4294 goto out; 4301 goto out;
4295 } 4302 }
4296 4303
@@ -4302,6 +4309,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4302 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4309 up_write(&OCFS2_I(inode)->ip_xattr_sem);
4303 4310
4304 ocfs2_inode_unlock(inode, 1); 4311 ocfs2_inode_unlock(inode, 1);
4312 ocfs2_rw_unlock(inode, 1);
4305 brelse(old_bh); 4313 brelse(old_bh);
4306 4314
4307 if (error) { 4315 if (error) {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c7a89cea5c5d..ddb662b32447 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1925,15 +1925,11 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1925 1925
1926 ocfs2_shutdown_local_alloc(osb); 1926 ocfs2_shutdown_local_alloc(osb);
1927 1927
1928 ocfs2_truncate_log_shutdown(osb);
1929
1928 /* This will disable recovery and flush any recovery work. */ 1930 /* This will disable recovery and flush any recovery work. */
1929 ocfs2_recovery_exit(osb); 1931 ocfs2_recovery_exit(osb);
1930 1932
1931 /*
1932 * During dismount, when it recovers another node it will call
1933 * ocfs2_recover_orphans and queue delayed work osb_truncate_log_wq.
1934 */
1935 ocfs2_truncate_log_shutdown(osb);
1936
1937 ocfs2_journal_shutdown(osb); 1933 ocfs2_journal_shutdown(osb);
1938 1934
1939 ocfs2_sync_blockdev(sb); 1935 ocfs2_sync_blockdev(sb);
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 0572035673f3..a70d45647898 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -237,13 +237,21 @@
237#define INTEL_BDW_GT3D_IDS(info) \ 237#define INTEL_BDW_GT3D_IDS(info) \
238 _INTEL_BDW_D_IDS(3, info) 238 _INTEL_BDW_D_IDS(3, info)
239 239
240#define INTEL_BDW_RSVDM_IDS(info) \
241 _INTEL_BDW_M_IDS(4, info)
242
243#define INTEL_BDW_RSVDD_IDS(info) \
244 _INTEL_BDW_D_IDS(4, info)
245
240#define INTEL_BDW_M_IDS(info) \ 246#define INTEL_BDW_M_IDS(info) \
241 INTEL_BDW_GT12M_IDS(info), \ 247 INTEL_BDW_GT12M_IDS(info), \
242 INTEL_BDW_GT3M_IDS(info) 248 INTEL_BDW_GT3M_IDS(info), \
249 INTEL_BDW_RSVDM_IDS(info)
243 250
244#define INTEL_BDW_D_IDS(info) \ 251#define INTEL_BDW_D_IDS(info) \
245 INTEL_BDW_GT12D_IDS(info), \ 252 INTEL_BDW_GT12D_IDS(info), \
246 INTEL_BDW_GT3D_IDS(info) 253 INTEL_BDW_GT3D_IDS(info), \
254 INTEL_BDW_RSVDD_IDS(info)
247 255
248#define INTEL_CHV_IDS(info) \ 256#define INTEL_CHV_IDS(info) \
249 INTEL_VGA_DEVICE(0x22b0, info), \ 257 INTEL_VGA_DEVICE(0x22b0, info), \
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index 7cf5c9969336..b91dd462ba85 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -145,6 +145,7 @@
145#define IMX6SL_CLK_USDHC4 132 145#define IMX6SL_CLK_USDHC4 132
146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133 146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133
147#define IMX6SL_CLK_SPBA 134 147#define IMX6SL_CLK_SPBA 134
148#define IMX6SL_CLK_END 135 148#define IMX6SL_CLK_ENET 135
149#define IMX6SL_CLK_END 136
149 150
150#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ 151#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h
index 0d2c7397e028..d80caa68aebd 100644
--- a/include/dt-bindings/clock/stih415-clks.h
+++ b/include/dt-bindings/clock/stih415-clks.h
@@ -10,6 +10,7 @@
10#define CLK_ETH1_PHY 4 10#define CLK_ETH1_PHY 4
11 11
12/* CLOCKGEN A1 */ 12/* CLOCKGEN A1 */
13#define CLK_ICN_IF_2 0
13#define CLK_GMAC0_PHY 3 14#define CLK_GMAC0_PHY 3
14 15
15#endif 16#endif
diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h
index 552c779eb6af..f9bdbd13568d 100644
--- a/include/dt-bindings/clock/stih416-clks.h
+++ b/include/dt-bindings/clock/stih416-clks.h
@@ -10,6 +10,7 @@
10#define CLK_ETH1_PHY 4 10#define CLK_ETH1_PHY 4
11 11
12/* CLOCKGEN A1 */ 12/* CLOCKGEN A1 */
13#define CLK_ICN_IF_2 0
13#define CLK_GMAC0_PHY 3 14#define CLK_GMAC0_PHY 3
14 15
15#endif 16#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a645769f020..d2633ee099d9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
189#define bio_io_error(bio) bio_endio((bio), -EIO) 198#define bio_io_error(bio) bio_endio((bio), -EIO)
190 199
191/* 200/*
@@ -644,10 +653,6 @@ struct biovec_slab {
644 653
645#if defined(CONFIG_BLK_DEV_INTEGRITY) 654#if defined(CONFIG_BLK_DEV_INTEGRITY)
646 655
647
648
649#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
650
651#define bip_for_each_vec(bvl, bip, iter) \ 656#define bip_for_each_vec(bvl, bip, iter) \
652 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 657 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
653 658
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 713f8b62b435..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -512,6 +512,7 @@ struct request_queue {
512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
515 516
516#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
517 (1 << QUEUE_FLAG_STACKABLE) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e2a6bd7fb133..45a91474487d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -143,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
143 * io scheduler registration 143 * io scheduler registration
144 */ 144 */
145extern void __init load_default_elevator_module(void); 145extern void __init load_default_elevator_module(void);
146extern int __init elv_register(struct elevator_type *); 146extern int elv_register(struct elevator_type *);
147extern void elv_unregister(struct elevator_type *); 147extern void elv_unregister(struct elevator_type *);
148 148
149/* 149/*
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 6a45fb583ff1..447775ee2c4b 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
32#ifdef arch_trigger_all_cpu_backtrace 32#ifdef arch_trigger_all_cpu_backtrace
33static inline bool trigger_all_cpu_backtrace(void) 33static inline bool trigger_all_cpu_backtrace(void)
34{ 34{
35 arch_trigger_all_cpu_backtrace(); 35 arch_trigger_all_cpu_backtrace(true);
36 36
37 return true; 37 return true;
38} 38}
39static inline bool trigger_allbutself_cpu_backtrace(void)
40{
41 arch_trigger_all_cpu_backtrace(false);
42 return true;
43}
39#else 44#else
40static inline bool trigger_all_cpu_backtrace(void) 45static inline bool trigger_all_cpu_backtrace(void)
41{ 46{
42 return false; 47 return false;
43} 48}
49static inline bool trigger_allbutself_cpu_backtrace(void)
50{
51 return false;
52}
44#endif 53#endif
45 54
46#ifdef CONFIG_LOCKUP_DETECTOR 55#ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
48u64 hw_nmi_get_sample_period(int watchdog_thresh); 57u64 hw_nmi_get_sample_period(int watchdog_thresh);
49extern int watchdog_user_enabled; 58extern int watchdog_user_enabled;
50extern int watchdog_thresh; 59extern int watchdog_thresh;
60extern int sysctl_softlockup_all_cpu_backtrace;
51struct ctl_table; 61struct ctl_table;
52extern int proc_dowatchdog(struct ctl_table *, int , 62extern int proc_dowatchdog(struct ctl_table *, int ,
53 void __user *, size_t *, loff_t *); 63 void __user *, size_t *, loff_t *);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3c545b48aeab..8304959ad336 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
360 ClearPageHead(page); 360 ClearPageHead(page);
361} 361}
362#endif 362#endif
363
364#define PG_head_mask ((1L << PG_head))
365
363#else 366#else
364/* 367/*
365 * Reduce page flag use as much as possible by overlapping 368 * Reduce page flag use as much as possible by overlapping
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 864ddafad8cc..68041446c450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -536,6 +536,15 @@ struct phy_driver {
536 /* See set_wol, but for checking whether Wake on LAN is enabled. */ 536 /* See set_wol, but for checking whether Wake on LAN is enabled. */
537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); 537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
538 538
539 /*
540 * Called to inform a PHY device driver when the core is about to
541 * change the link state. This callback is supposed to be used as
542 * fixup hook for drivers that need to take action when the link
543 * state changes. Drivers are by no means allowed to mess with the
544 * PHY device structure in their implementations.
545 */
546 void (*link_change_notify)(struct phy_device *dev);
547
539 struct device_driver driver; 548 struct device_driver driver;
540}; 549};
541#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 550#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8e98297f1388..ec538fc287a6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,8 +305,6 @@ struct ucred {
305/* IPX options */ 305/* IPX options */
306#define IPX_TYPE 1 306#define IPX_TYPE 1
307 307
308extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
309 int offset, int len);
310extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 308extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
311 struct iovec *iov, 309 struct iovec *iov,
312 int offset, 310 int offset,
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
315 unsigned long nr_segs); 313 unsigned long nr_segs);
316 314
317extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); 315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
318extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
319 int offset, int len);
320extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
321extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
322 318
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e2231e47cec1..09a7cffc224e 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
94 return i->count; 94 return i->count;
95} 95}
96 96
97static inline void iov_iter_truncate(struct iov_iter *i, size_t count) 97/*
98 * Cap the iov_iter by given limit; note that the second argument is
99 * *not* the new size - it's upper limit for such. Passing it a value
100 * greater than the amount of data in iov_iter is fine - it'll just do
101 * nothing in that case.
102 */
103static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
98{ 104{
105 /*
106 * count doesn't have to fit in size_t - comparison extends both
107 * operands to u64 here and any value that would be truncated by
108 * conversion in assignement is by definition greater than all
109 * values of size_t, including old i->count.
110 */
99 if (i->count > count) 111 if (i->count > count)
100 i->count = count; 112 i->count = count;
101} 113}
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
111 123
112int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 124int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
113int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); 125int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
114 126int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
127 int offset, int len);
128int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
129 int offset, int len);
115 130
116#endif 131#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 7ee6ce6564ae..713b0b88bd5a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -503,9 +503,9 @@ enum nft_chain_flags {
503 * @net: net namespace that this chain belongs to 503 * @net: net namespace that this chain belongs to
504 * @table: table that this chain belongs to 504 * @table: table that this chain belongs to
505 * @handle: chain handle 505 * @handle: chain handle
506 * @flags: bitmask of enum nft_chain_flags
507 * @use: number of jump references to this chain 506 * @use: number of jump references to this chain
508 * @level: length of longest path to this chain 507 * @level: length of longest path to this chain
508 * @flags: bitmask of enum nft_chain_flags
509 * @name: name of the chain 509 * @name: name of the chain
510 */ 510 */
511struct nft_chain { 511struct nft_chain {
@@ -514,9 +514,9 @@ struct nft_chain {
514 struct net *net; 514 struct net *net;
515 struct nft_table *table; 515 struct nft_table *table;
516 u64 handle; 516 u64 handle;
517 u8 flags; 517 u32 use;
518 u16 use;
519 u16 level; 518 u16 level;
519 u8 flags;
520 char name[NFT_CHAIN_MAXNAMELEN]; 520 char name[NFT_CHAIN_MAXNAMELEN];
521}; 521};
522 522
diff --git a/include/net/sock.h b/include/net/sock.h
index 07b7fcd60d80..173cae485de1 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1730,8 +1730,8 @@ sk_dst_get(struct sock *sk)
1730 1730
1731 rcu_read_lock(); 1731 rcu_read_lock();
1732 dst = rcu_dereference(sk->sk_dst_cache); 1732 dst = rcu_dereference(sk->sk_dst_cache);
1733 if (dst) 1733 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1734 dst_hold(dst); 1734 dst = NULL;
1735 rcu_read_unlock(); 1735 rcu_read_unlock();
1736 return dst; 1736 return dst;
1737} 1737}
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 0fd06fef9fac..26b4f2e13275 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -44,6 +44,12 @@
44#undef __field_ext 44#undef __field_ext
45#define __field_ext(type, item, filter_type) type item; 45#define __field_ext(type, item, filter_type) type item;
46 46
47#undef __field_struct
48#define __field_struct(type, item) type item;
49
50#undef __field_struct_ext
51#define __field_struct_ext(type, item, filter_type) type item;
52
47#undef __array 53#undef __array
48#define __array(type, item, len) type item[len]; 54#define __array(type, item, len) type item[len];
49 55
@@ -122,6 +128,12 @@
122#undef __field_ext 128#undef __field_ext
123#define __field_ext(type, item, filter_type) 129#define __field_ext(type, item, filter_type)
124 130
131#undef __field_struct
132#define __field_struct(type, item)
133
134#undef __field_struct_ext
135#define __field_struct_ext(type, item, filter_type)
136
125#undef __array 137#undef __array
126#define __array(type, item, len) 138#define __array(type, item, len)
127 139
@@ -315,9 +327,21 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
315 if (ret) \ 327 if (ret) \
316 return ret; 328 return ret;
317 329
330#undef __field_struct_ext
331#define __field_struct_ext(type, item, filter_type) \
332 ret = trace_define_field(event_call, #type, #item, \
333 offsetof(typeof(field), item), \
334 sizeof(field.item), \
335 0, filter_type); \
336 if (ret) \
337 return ret;
338
318#undef __field 339#undef __field
319#define __field(type, item) __field_ext(type, item, FILTER_OTHER) 340#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
320 341
342#undef __field_struct
343#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
344
321#undef __array 345#undef __array
322#define __array(type, item, len) \ 346#define __array(type, item, len) \
323 do { \ 347 do { \
@@ -379,6 +403,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
379#undef __field_ext 403#undef __field_ext
380#define __field_ext(type, item, filter_type) 404#define __field_ext(type, item, filter_type)
381 405
406#undef __field_struct
407#define __field_struct(type, item)
408
409#undef __field_struct_ext
410#define __field_struct_ext(type, item, filter_type)
411
382#undef __array 412#undef __array
383#define __array(type, item, len) 413#define __array(type, item, len)
384 414
@@ -550,6 +580,9 @@ static inline notrace int ftrace_get_offsets_##call( \
550#undef __field 580#undef __field
551#define __field(type, item) 581#define __field(type, item)
552 582
583#undef __field_struct
584#define __field_struct(type, item)
585
553#undef __array 586#undef __array
554#define __array(type, item, len) 587#define __array(type, item, len)
555 588
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index fed853f3d7aa..9674145e2f6a 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -4,6 +4,7 @@
4#include <linux/tracepoint.h> 4#include <linux/tracepoint.h>
5#include <linux/unistd.h> 5#include <linux/unistd.h>
6#include <linux/ftrace_event.h> 6#include <linux/ftrace_event.h>
7#include <linux/thread_info.h>
7 8
8#include <asm/ptrace.h> 9#include <asm/ptrace.h>
9 10
@@ -32,4 +33,18 @@ struct syscall_metadata {
32 struct ftrace_event_call *exit_event; 33 struct ftrace_event_call *exit_event;
33}; 34};
34 35
36#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
37static inline void syscall_tracepoint_update(struct task_struct *p)
38{
39 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
40 set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
41 else
42 clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
43}
44#else
45static inline void syscall_tracepoint_update(struct task_struct *p)
46{
47}
48#endif
49
35#endif /* _TRACE_SYSCALL_H */ 50#endif /* _TRACE_SYSCALL_H */
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 21eed488783f..1964026b5e09 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -39,7 +39,7 @@
39struct snd_compressed_buffer { 39struct snd_compressed_buffer {
40 __u32 fragment_size; 40 __u32 fragment_size;
41 __u32 fragments; 41 __u32 fragments;
42}; 42} __attribute__((packed, aligned(4)));
43 43
44/** 44/**
45 * struct snd_compr_params: compressed stream params 45 * struct snd_compr_params: compressed stream params
@@ -51,7 +51,7 @@ struct snd_compr_params {
51 struct snd_compressed_buffer buffer; 51 struct snd_compressed_buffer buffer;
52 struct snd_codec codec; 52 struct snd_codec codec;
53 __u8 no_wake_mode; 53 __u8 no_wake_mode;
54}; 54} __attribute__((packed, aligned(4)));
55 55
56/** 56/**
57 * struct snd_compr_tstamp: timestamp descriptor 57 * struct snd_compr_tstamp: timestamp descriptor
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
70 __u32 pcm_frames; 70 __u32 pcm_frames;
71 __u32 pcm_io_frames; 71 __u32 pcm_io_frames;
72 __u32 sampling_rate; 72 __u32 sampling_rate;
73}; 73} __attribute__((packed, aligned(4)));
74 74
75/** 75/**
76 * struct snd_compr_avail: avail descriptor 76 * struct snd_compr_avail: avail descriptor
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
80struct snd_compr_avail { 80struct snd_compr_avail {
81 __u64 avail; 81 __u64 avail;
82 struct snd_compr_tstamp tstamp; 82 struct snd_compr_tstamp tstamp;
83} __attribute__((packed)); 83} __attribute__((packed, aligned(4)));
84 84
85enum snd_compr_direction { 85enum snd_compr_direction {
86 SND_COMPRESS_PLAYBACK = 0, 86 SND_COMPRESS_PLAYBACK = 0,
@@ -107,7 +107,7 @@ struct snd_compr_caps {
107 __u32 max_fragments; 107 __u32 max_fragments;
108 __u32 codecs[MAX_NUM_CODECS]; 108 __u32 codecs[MAX_NUM_CODECS];
109 __u32 reserved[11]; 109 __u32 reserved[11];
110}; 110} __attribute__((packed, aligned(4)));
111 111
112/** 112/**
113 * struct snd_compr_codec_caps: query capability of codec 113 * struct snd_compr_codec_caps: query capability of codec
@@ -119,7 +119,7 @@ struct snd_compr_codec_caps {
119 __u32 codec; 119 __u32 codec;
120 __u32 num_descriptors; 120 __u32 num_descriptors;
121 struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS]; 121 struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS];
122}; 122} __attribute__((packed, aligned(4)));
123 123
124/** 124/**
125 * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the 125 * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
@@ -140,7 +140,7 @@ enum {
140struct snd_compr_metadata { 140struct snd_compr_metadata {
141 __u32 key; 141 __u32 key;
142 __u32 value[8]; 142 __u32 value[8];
143}; 143} __attribute__((packed, aligned(4)));
144 144
145/** 145/**
146 * compress path ioctl definitions 146 * compress path ioctl definitions
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 165e7059de75..d9bd9ca0d5b0 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -268,7 +268,7 @@ struct snd_enc_vorbis {
268 __u32 max_bit_rate; 268 __u32 max_bit_rate;
269 __u32 min_bit_rate; 269 __u32 min_bit_rate;
270 __u32 downmix; 270 __u32 downmix;
271}; 271} __attribute__((packed, aligned(4)));
272 272
273 273
274/** 274/**
@@ -284,7 +284,7 @@ struct snd_enc_real {
284 __u32 quant_bits; 284 __u32 quant_bits;
285 __u32 start_region; 285 __u32 start_region;
286 __u32 num_regions; 286 __u32 num_regions;
287}; 287} __attribute__((packed, aligned(4)));
288 288
289/** 289/**
290 * struct snd_enc_flac 290 * struct snd_enc_flac
@@ -308,12 +308,12 @@ struct snd_enc_real {
308struct snd_enc_flac { 308struct snd_enc_flac {
309 __u32 num; 309 __u32 num;
310 __u32 gain; 310 __u32 gain;
311}; 311} __attribute__((packed, aligned(4)));
312 312
313struct snd_enc_generic { 313struct snd_enc_generic {
314 __u32 bw; /* encoder bandwidth */ 314 __u32 bw; /* encoder bandwidth */
315 __s32 reserved[15]; 315 __s32 reserved[15];
316}; 316} __attribute__((packed, aligned(4)));
317 317
318union snd_codec_options { 318union snd_codec_options {
319 struct snd_enc_wma wma; 319 struct snd_enc_wma wma;
@@ -321,7 +321,7 @@ union snd_codec_options {
321 struct snd_enc_real real; 321 struct snd_enc_real real;
322 struct snd_enc_flac flac; 322 struct snd_enc_flac flac;
323 struct snd_enc_generic generic; 323 struct snd_enc_generic generic;
324}; 324} __attribute__((packed, aligned(4)));
325 325
326/** struct snd_codec_desc - description of codec capabilities 326/** struct snd_codec_desc - description of codec capabilities
327 * @max_ch: Maximum number of audio channels 327 * @max_ch: Maximum number of audio channels
@@ -358,7 +358,7 @@ struct snd_codec_desc {
358 __u32 formats; 358 __u32 formats;
359 __u32 min_buffer; 359 __u32 min_buffer;
360 __u32 reserved[15]; 360 __u32 reserved[15];
361}; 361} __attribute__((packed, aligned(4)));
362 362
363/** struct snd_codec 363/** struct snd_codec
364 * @id: Identifies the supported audio encoder/decoder. 364 * @id: Identifies the supported audio encoder/decoder.
@@ -399,6 +399,6 @@ struct snd_codec {
399 __u32 align; 399 __u32 align;
400 union snd_codec_options options; 400 union snd_codec_options options;
401 __u32 reserved[3]; 401 __u32 reserved[3];
402}; 402} __attribute__((packed, aligned(4)));
403 403
404#endif 404#endif
diff --git a/kernel/fork.c b/kernel/fork.c
index d2799d1fc952..6a13c46cd87d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1487,7 +1487,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1487 1487
1488 total_forks++; 1488 total_forks++;
1489 spin_unlock(&current->sighand->siglock); 1489 spin_unlock(&current->sighand->siglock);
1490 syscall_tracepoint_update(p);
1490 write_unlock_irq(&tasklist_lock); 1491 write_unlock_irq(&tasklist_lock);
1492
1491 proc_fork_connector(p); 1493 proc_fork_connector(p);
1492 cgroup_post_fork(p); 1494 cgroup_post_fork(p);
1493 if (clone_flags & CLONE_THREAD) 1495 if (clone_flags & CLONE_THREAD)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 6748688813d0..369f41a94124 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1617,6 +1617,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1617#ifdef CONFIG_MEMORY_FAILURE 1617#ifdef CONFIG_MEMORY_FAILURE
1618 VMCOREINFO_NUMBER(PG_hwpoison); 1618 VMCOREINFO_NUMBER(PG_hwpoison);
1619#endif 1619#endif
1620 VMCOREINFO_NUMBER(PG_head_mask);
1620 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); 1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1621 1622
1622 arch_crash_save_vmcoreinfo(); 1623 arch_crash_save_vmcoreinfo();
diff --git a/kernel/smp.c b/kernel/smp.c
index 306f8180b0d5..80c33f8de14f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29 29
30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
31 31
32static void flush_smp_call_function_queue(bool warn_cpu_offline);
33
32static int 34static int
33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) 35hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34{ 36{
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
51#ifdef CONFIG_HOTPLUG_CPU 53#ifdef CONFIG_HOTPLUG_CPU
52 case CPU_UP_CANCELED: 54 case CPU_UP_CANCELED:
53 case CPU_UP_CANCELED_FROZEN: 55 case CPU_UP_CANCELED_FROZEN:
56 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
54 57
55 case CPU_DEAD: 58 case CPU_DEAD:
56 case CPU_DEAD_FROZEN: 59 case CPU_DEAD_FROZEN:
57 free_cpumask_var(cfd->cpumask); 60 free_cpumask_var(cfd->cpumask);
58 free_percpu(cfd->csd); 61 free_percpu(cfd->csd);
59 break; 62 break;
63
64 case CPU_DYING:
65 case CPU_DYING_FROZEN:
66 /*
67 * The IPIs for the smp-call-function callbacks queued by other
68 * CPUs might arrive late, either due to hardware latencies or
69 * because this CPU disabled interrupts (inside stop-machine)
70 * before the IPIs were sent. So flush out any pending callbacks
71 * explicitly (without waiting for the IPIs to arrive), to
72 * ensure that the outgoing CPU doesn't go offline with work
73 * still pending.
74 */
75 flush_smp_call_function_queue(false);
76 break;
60#endif 77#endif
61 }; 78 };
62 79
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
177 return 0; 194 return 0;
178} 195}
179 196
180/* 197/**
181 * Invoked by arch to handle an IPI for call function single. Must be 198 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
182 * called from the arch with interrupts disabled. 199 *
200 * Invoked by arch to handle an IPI for call function single.
201 * Must be called with interrupts disabled.
183 */ 202 */
184void generic_smp_call_function_single_interrupt(void) 203void generic_smp_call_function_single_interrupt(void)
185{ 204{
205 flush_smp_call_function_queue(true);
206}
207
208/**
209 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
210 *
211 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
212 * offline CPU. Skip this check if set to 'false'.
213 *
214 * Flush any pending smp-call-function callbacks queued on this CPU. This is
215 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
216 * to ensure that all pending IPI callbacks are run before it goes completely
217 * offline.
218 *
219 * Loop through the call_single_queue and run all the queued callbacks.
220 * Must be called with interrupts disabled.
221 */
222static void flush_smp_call_function_queue(bool warn_cpu_offline)
223{
224 struct llist_head *head;
186 struct llist_node *entry; 225 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next; 226 struct call_single_data *csd, *csd_next;
188 static bool warned; 227 static bool warned;
189 228
190 entry = llist_del_all(&__get_cpu_var(call_single_queue)); 229 WARN_ON(!irqs_disabled());
230
231 head = &__get_cpu_var(call_single_queue);
232 entry = llist_del_all(head);
191 entry = llist_reverse_order(entry); 233 entry = llist_reverse_order(entry);
192 234
193 /* 235 /* There shouldn't be any pending callbacks on an offline CPU. */
194 * Shouldn't receive this interrupt on a cpu that is not yet online. 236 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
195 */ 237 !warned && !llist_empty(head))) {
196 if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
197 warned = true; 238 warned = true;
198 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 239 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
199 240
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7de6555cfea0..75b22e22a72c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -136,7 +136,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
136/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 136/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
137static int maxolduid = 65535; 137static int maxolduid = 65535;
138static int minolduid; 138static int minolduid;
139static int min_percpu_pagelist_fract = 8;
140 139
141static int ngroups_max = NGROUPS_MAX; 140static int ngroups_max = NGROUPS_MAX;
142static const int cap_last_cap = CAP_LAST_CAP; 141static const int cap_last_cap = CAP_LAST_CAP;
@@ -861,6 +860,17 @@ static struct ctl_table kern_table[] = {
861 .extra1 = &zero, 860 .extra1 = &zero,
862 .extra2 = &one, 861 .extra2 = &one,
863 }, 862 },
863#ifdef CONFIG_SMP
864 {
865 .procname = "softlockup_all_cpu_backtrace",
866 .data = &sysctl_softlockup_all_cpu_backtrace,
867 .maxlen = sizeof(int),
868 .mode = 0644,
869 .proc_handler = proc_dointvec_minmax,
870 .extra1 = &zero,
871 .extra2 = &one,
872 },
873#endif /* CONFIG_SMP */
864 { 874 {
865 .procname = "nmi_watchdog", 875 .procname = "nmi_watchdog",
866 .data = &watchdog_user_enabled, 876 .data = &watchdog_user_enabled,
@@ -1317,7 +1327,7 @@ static struct ctl_table vm_table[] = {
1317 .maxlen = sizeof(percpu_pagelist_fraction), 1327 .maxlen = sizeof(percpu_pagelist_fraction),
1318 .mode = 0644, 1328 .mode = 0644,
1319 .proc_handler = percpu_pagelist_fraction_sysctl_handler, 1329 .proc_handler = percpu_pagelist_fraction_sysctl_handler,
1320 .extra1 = &min_percpu_pagelist_fract, 1330 .extra1 = &zero,
1321 }, 1331 },
1322#ifdef CONFIG_MMU 1332#ifdef CONFIG_MMU
1323 { 1333 {
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 33cbd8c203f8..3490407dc7b7 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -492,33 +492,29 @@ static int sys_tracepoint_refcount;
492 492
493void syscall_regfunc(void) 493void syscall_regfunc(void)
494{ 494{
495 unsigned long flags; 495 struct task_struct *p, *t;
496 struct task_struct *g, *t;
497 496
498 if (!sys_tracepoint_refcount) { 497 if (!sys_tracepoint_refcount) {
499 read_lock_irqsave(&tasklist_lock, flags); 498 read_lock(&tasklist_lock);
500 do_each_thread(g, t) { 499 for_each_process_thread(p, t) {
501 /* Skip kernel threads. */ 500 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
502 if (t->mm) 501 }
503 set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); 502 read_unlock(&tasklist_lock);
504 } while_each_thread(g, t);
505 read_unlock_irqrestore(&tasklist_lock, flags);
506 } 503 }
507 sys_tracepoint_refcount++; 504 sys_tracepoint_refcount++;
508} 505}
509 506
510void syscall_unregfunc(void) 507void syscall_unregfunc(void)
511{ 508{
512 unsigned long flags; 509 struct task_struct *p, *t;
513 struct task_struct *g, *t;
514 510
515 sys_tracepoint_refcount--; 511 sys_tracepoint_refcount--;
516 if (!sys_tracepoint_refcount) { 512 if (!sys_tracepoint_refcount) {
517 read_lock_irqsave(&tasklist_lock, flags); 513 read_lock(&tasklist_lock);
518 do_each_thread(g, t) { 514 for_each_process_thread(p, t) {
519 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT); 515 clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
520 } while_each_thread(g, t); 516 }
521 read_unlock_irqrestore(&tasklist_lock, flags); 517 read_unlock(&tasklist_lock);
522 } 518 }
523} 519}
524#endif 520#endif
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 516203e665fc..c3319bd1b040 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -31,6 +31,12 @@
31 31
32int watchdog_user_enabled = 1; 32int watchdog_user_enabled = 1;
33int __read_mostly watchdog_thresh = 10; 33int __read_mostly watchdog_thresh = 10;
34#ifdef CONFIG_SMP
35int __read_mostly sysctl_softlockup_all_cpu_backtrace;
36#else
37#define sysctl_softlockup_all_cpu_backtrace 0
38#endif
39
34static int __read_mostly watchdog_running; 40static int __read_mostly watchdog_running;
35static u64 __read_mostly sample_period; 41static u64 __read_mostly sample_period;
36 42
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
47static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 53static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 54static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
49#endif 55#endif
56static unsigned long soft_lockup_nmi_warn;
50 57
51/* boot commands */ 58/* boot commands */
52/* 59/*
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str)
95} 102}
96__setup("nosoftlockup", nosoftlockup_setup); 103__setup("nosoftlockup", nosoftlockup_setup);
97/* */ 104/* */
105#ifdef CONFIG_SMP
106static int __init softlockup_all_cpu_backtrace_setup(char *str)
107{
108 sysctl_softlockup_all_cpu_backtrace =
109 !!simple_strtol(str, NULL, 0);
110 return 1;
111}
112__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
113#endif
98 114
99/* 115/*
100 * Hard-lockup warnings should be triggered after just a few seconds. Soft- 116 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
271 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 287 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
272 struct pt_regs *regs = get_irq_regs(); 288 struct pt_regs *regs = get_irq_regs();
273 int duration; 289 int duration;
290 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
274 291
275 /* kick the hardlockup detector */ 292 /* kick the hardlockup detector */
276 watchdog_interrupt_count(); 293 watchdog_interrupt_count();
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
317 if (__this_cpu_read(soft_watchdog_warn) == true) 334 if (__this_cpu_read(soft_watchdog_warn) == true)
318 return HRTIMER_RESTART; 335 return HRTIMER_RESTART;
319 336
337 if (softlockup_all_cpu_backtrace) {
338 /* Prevent multiple soft-lockup reports if one cpu is already
339 * engaged in dumping cpu back traces
340 */
341 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
342 /* Someone else will report us. Let's give up */
343 __this_cpu_write(soft_watchdog_warn, true);
344 return HRTIMER_RESTART;
345 }
346 }
347
320 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 348 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
321 smp_processor_id(), duration, 349 smp_processor_id(), duration,
322 current->comm, task_pid_nr(current)); 350 current->comm, task_pid_nr(current));
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
327 else 355 else
328 dump_stack(); 356 dump_stack();
329 357
358 if (softlockup_all_cpu_backtrace) {
359 /* Avoid generating two back traces for current
360 * given that one is already made above
361 */
362 trigger_allbutself_cpu_backtrace();
363
364 clear_bit(0, &soft_lockup_nmi_warn);
365 /* Barrier to sync with other cpus */
366 smp_mb__after_atomic();
367 }
368
330 if (softlockup_panic) 369 if (softlockup_panic)
331 panic("softlockup: hung tasks"); 370 panic("softlockup: hung tasks");
332 __this_cpu_write(soft_watchdog_warn, true); 371 __this_cpu_write(soft_watchdog_warn, true);
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void)
527 int cpu; 566 int cpu;
528 567
529 get_online_cpus(); 568 get_online_cpus();
530 preempt_disable();
531 for_each_online_cpu(cpu) 569 for_each_online_cpu(cpu)
532 update_timers(cpu); 570 update_timers(cpu);
533 preempt_enable();
534 put_online_cpus(); 571 put_online_cpus();
535} 572}
536 573
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7cfcc1b8e101..7a638aa3545b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -930,7 +930,7 @@ config LOCKDEP
930 bool 930 bool
931 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 931 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
932 select STACKTRACE 932 select STACKTRACE
933 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC 933 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
934 select KALLSYMS 934 select KALLSYMS
935 select KALLSYMS_ALL 935 select KALLSYMS_ALL
936 936
@@ -1408,7 +1408,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1408 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1408 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1409 depends on !X86_64 1409 depends on !X86_64
1410 select STACKTRACE 1410 select STACKTRACE
1411 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC 1411 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
1412 help 1412 help
1413 Provide stacktrace filter for fault-injection capabilities 1413 Provide stacktrace filter for fault-injection capabilities
1414 1414
diff --git a/lib/iovec.c b/lib/iovec.c
index 454baa88bf27..7a7c2da4cddf 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
51 return 0; 51 return 0;
52} 52}
53EXPORT_SYMBOL(memcpy_toiovec); 53EXPORT_SYMBOL(memcpy_toiovec);
54
55/*
56 * Copy kernel to iovec. Returns -EFAULT on error.
57 */
58
59int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
60 int offset, int len)
61{
62 int copy;
63 for (; len > 0; ++iov) {
64 /* Skip over the finished iovecs */
65 if (unlikely(offset >= iov->iov_len)) {
66 offset -= iov->iov_len;
67 continue;
68 }
69 copy = min_t(unsigned int, iov->iov_len - offset, len);
70 if (copy_to_user(iov->iov_base + offset, kdata, copy))
71 return -EFAULT;
72 offset = 0;
73 kdata += copy;
74 len -= copy;
75 }
76
77 return 0;
78}
79EXPORT_SYMBOL(memcpy_toiovecend);
80
81/*
82 * Copy iovec to kernel. Returns -EFAULT on error.
83 */
84
85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
86 int offset, int len)
87{
88 /* Skip over the finished iovecs */
89 while (offset >= iov->iov_len) {
90 offset -= iov->iov_len;
91 iov++;
92 }
93
94 while (len > 0) {
95 u8 __user *base = iov->iov_base + offset;
96 int copy = min_t(unsigned int, len, iov->iov_len - offset);
97
98 offset = 0;
99 if (copy_from_user(kdata, base, copy))
100 return -EFAULT;
101 len -= copy;
102 kdata += copy;
103 iov++;
104 }
105
106 return 0;
107}
108EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..b74da447e81e 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
72 len = *ip++; 72 len = *ip++;
73 for (; len == 255; length += 255) 73 for (; len == 255; length += 255)
74 len = *ip++; 74 len = *ip++;
75 if (unlikely(length > (size_t)(length + len)))
76 goto _output_error;
75 length += len; 77 length += len;
76 } 78 }
77 79
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
106 if (length == ML_MASK) { 108 if (length == ML_MASK) {
107 for (; *ip == 255; length += 255) 109 for (; *ip == 255; length += 255)
108 ip++; 110 ip++;
111 if (unlikely(length > (size_t)(length + *ip)))
112 goto _output_error;
109 length += *ip++; 113 length += *ip++;
110 } 114 }
111 115
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
155 159
156 /* write overflow error detected */ 160 /* write overflow error detected */
157_output_error: 161_output_error:
158 return (int) (-(((char *)ip) - source)); 162 return -1;
159} 163}
160 164
161static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, 165static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..8563081e8da3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,11 +19,31 @@
19#include <linux/lzo.h> 19#include <linux/lzo.h>
20#include "lzodefs.h" 20#include "lzodefs.h"
21 21
22#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) 22#define HAVE_IP(t, x) \
23#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) 23 (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
24#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun 24 (((t + x) >= t) && ((t + x) >= x)))
25#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun 25
26#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun 26#define HAVE_OP(t, x) \
27 (((size_t)(op_end - op) >= (size_t)(t + x)) && \
28 (((t + x) >= t) && ((t + x) >= x)))
29
30#define NEED_IP(t, x) \
31 do { \
32 if (!HAVE_IP(t, x)) \
33 goto input_overrun; \
34 } while (0)
35
36#define NEED_OP(t, x) \
37 do { \
38 if (!HAVE_OP(t, x)) \
39 goto output_overrun; \
40 } while (0)
41
42#define TEST_LB(m_pos) \
43 do { \
44 if ((m_pos) < out) \
45 goto lookbehind_overrun; \
46 } while (0)
27 47
28int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, 48int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
29 unsigned char *out, size_t *out_len) 49 unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
58 while (unlikely(*ip == 0)) { 78 while (unlikely(*ip == 0)) {
59 t += 255; 79 t += 255;
60 ip++; 80 ip++;
61 NEED_IP(1); 81 NEED_IP(1, 0);
62 } 82 }
63 t += 15 + *ip++; 83 t += 15 + *ip++;
64 } 84 }
65 t += 3; 85 t += 3;
66copy_literal_run: 86copy_literal_run:
67#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 87#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { 88 if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
69 const unsigned char *ie = ip + t; 89 const unsigned char *ie = ip + t;
70 unsigned char *oe = op + t; 90 unsigned char *oe = op + t;
71 do { 91 do {
@@ -81,8 +101,8 @@ copy_literal_run:
81 } else 101 } else
82#endif 102#endif
83 { 103 {
84 NEED_OP(t); 104 NEED_OP(t, 0);
85 NEED_IP(t + 3); 105 NEED_IP(t, 3);
86 do { 106 do {
87 *op++ = *ip++; 107 *op++ = *ip++;
88 } while (--t > 0); 108 } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
95 m_pos -= t >> 2; 115 m_pos -= t >> 2;
96 m_pos -= *ip++ << 2; 116 m_pos -= *ip++ << 2;
97 TEST_LB(m_pos); 117 TEST_LB(m_pos);
98 NEED_OP(2); 118 NEED_OP(2, 0);
99 op[0] = m_pos[0]; 119 op[0] = m_pos[0];
100 op[1] = m_pos[1]; 120 op[1] = m_pos[1];
101 op += 2; 121 op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
119 while (unlikely(*ip == 0)) { 139 while (unlikely(*ip == 0)) {
120 t += 255; 140 t += 255;
121 ip++; 141 ip++;
122 NEED_IP(1); 142 NEED_IP(1, 0);
123 } 143 }
124 t += 31 + *ip++; 144 t += 31 + *ip++;
125 NEED_IP(2); 145 NEED_IP(2, 0);
126 } 146 }
127 m_pos = op - 1; 147 m_pos = op - 1;
128 next = get_unaligned_le16(ip); 148 next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
137 while (unlikely(*ip == 0)) { 157 while (unlikely(*ip == 0)) {
138 t += 255; 158 t += 255;
139 ip++; 159 ip++;
140 NEED_IP(1); 160 NEED_IP(1, 0);
141 } 161 }
142 t += 7 + *ip++; 162 t += 7 + *ip++;
143 NEED_IP(2); 163 NEED_IP(2, 0);
144 } 164 }
145 next = get_unaligned_le16(ip); 165 next = get_unaligned_le16(ip);
146 ip += 2; 166 ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
154#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 174#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
155 if (op - m_pos >= 8) { 175 if (op - m_pos >= 8) {
156 unsigned char *oe = op + t; 176 unsigned char *oe = op + t;
157 if (likely(HAVE_OP(t + 15))) { 177 if (likely(HAVE_OP(t, 15))) {
158 do { 178 do {
159 COPY8(op, m_pos); 179 COPY8(op, m_pos);
160 op += 8; 180 op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
164 m_pos += 8; 184 m_pos += 8;
165 } while (op < oe); 185 } while (op < oe);
166 op = oe; 186 op = oe;
167 if (HAVE_IP(6)) { 187 if (HAVE_IP(6, 0)) {
168 state = next; 188 state = next;
169 COPY4(op, ip); 189 COPY4(op, ip);
170 op += next; 190 op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
172 continue; 192 continue;
173 } 193 }
174 } else { 194 } else {
175 NEED_OP(t); 195 NEED_OP(t, 0);
176 do { 196 do {
177 *op++ = *m_pos++; 197 *op++ = *m_pos++;
178 } while (op < oe); 198 } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
181#endif 201#endif
182 { 202 {
183 unsigned char *oe = op + t; 203 unsigned char *oe = op + t;
184 NEED_OP(t); 204 NEED_OP(t, 0);
185 op[0] = m_pos[0]; 205 op[0] = m_pos[0];
186 op[1] = m_pos[1]; 206 op[1] = m_pos[1];
187 op += 2; 207 op += 2;
@@ -194,15 +214,15 @@ match_next:
194 state = next; 214 state = next;
195 t = next; 215 t = next;
196#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 216#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
197 if (likely(HAVE_IP(6) && HAVE_OP(4))) { 217 if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
198 COPY4(op, ip); 218 COPY4(op, ip);
199 op += t; 219 op += t;
200 ip += t; 220 ip += t;
201 } else 221 } else
202#endif 222#endif
203 { 223 {
204 NEED_IP(t + 3); 224 NEED_IP(t, 3);
205 NEED_OP(t); 225 NEED_OP(t, 0);
206 while (t > 0) { 226 while (t > 0) {
207 *op++ = *ip++; 227 *op++ = *ip++;
208 t--; 228 t--;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 649d097853a1..4abda074ea45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
86 * We need to save away the original address corresponding to a mapped entry 86 * We need to save away the original address corresponding to a mapped entry
87 * for the sync operations. 87 * for the sync operations.
88 */ 88 */
89#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
89static phys_addr_t *io_tlb_orig_addr; 90static phys_addr_t *io_tlb_orig_addr;
90 91
91/* 92/*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188 io_tlb_list = memblock_virt_alloc( 189 io_tlb_list = memblock_virt_alloc(
189 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), 190 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
190 PAGE_SIZE); 191 PAGE_SIZE);
191 for (i = 0; i < io_tlb_nslabs; i++)
192 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
193 io_tlb_index = 0;
194 io_tlb_orig_addr = memblock_virt_alloc( 192 io_tlb_orig_addr = memblock_virt_alloc(
195 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), 193 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
196 PAGE_SIZE); 194 PAGE_SIZE);
195 for (i = 0; i < io_tlb_nslabs; i++) {
196 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
197 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
198 }
199 io_tlb_index = 0;
197 200
198 if (verbose) 201 if (verbose)
199 swiotlb_print_info(); 202 swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
313 if (!io_tlb_list) 316 if (!io_tlb_list)
314 goto cleanup3; 317 goto cleanup3;
315 318
316 for (i = 0; i < io_tlb_nslabs; i++)
317 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
318 io_tlb_index = 0;
319
320 io_tlb_orig_addr = (phys_addr_t *) 319 io_tlb_orig_addr = (phys_addr_t *)
321 __get_free_pages(GFP_KERNEL, 320 __get_free_pages(GFP_KERNEL,
322 get_order(io_tlb_nslabs * 321 get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
324 if (!io_tlb_orig_addr) 323 if (!io_tlb_orig_addr)
325 goto cleanup4; 324 goto cleanup4;
326 325
327 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 326 for (i = 0; i < io_tlb_nslabs; i++) {
327 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
328 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
329 }
330 io_tlb_index = 0;
328 331
329 swiotlb_print_info(); 332 swiotlb_print_info();
330 333
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
556 /* 559 /*
557 * First, sync the memory before unmapping the entry 560 * First, sync the memory before unmapping the entry
558 */ 561 */
559 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 562 if (orig_addr != INVALID_PHYS_ADDR &&
563 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
560 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 564 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
561 565
562 /* 566 /*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
573 * Step 1: return the slots to the free list, merging the 577 * Step 1: return the slots to the free list, merging the
574 * slots with superceeding slots 578 * slots with superceeding slots
575 */ 579 */
576 for (i = index + nslots - 1; i >= index; i--) 580 for (i = index + nslots - 1; i >= index; i--) {
577 io_tlb_list[i] = ++count; 581 io_tlb_list[i] = ++count;
582 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
583 }
578 /* 584 /*
579 * Step 2: merge the returned slots with the preceding slots, 585 * Step 2: merge the returned slots with the preceding slots,
580 * if available (non zero) 586 * if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
593 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 599 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
594 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 600 phys_addr_t orig_addr = io_tlb_orig_addr[index];
595 601
602 if (orig_addr == INVALID_PHYS_ADDR)
603 return;
596 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); 604 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
597 605
598 switch (target) { 606 switch (target) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e60837dc785c..33514d88fef9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -941,6 +941,37 @@ unlock:
941 spin_unlock(ptl); 941 spin_unlock(ptl);
942} 942}
943 943
944/*
945 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
946 * during copy_user_huge_page()'s copy_page_rep(): in the case when
947 * the source page gets split and a tail freed before copy completes.
948 * Called under pmd_lock of checked pmd, so safe from splitting itself.
949 */
950static void get_user_huge_page(struct page *page)
951{
952 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
953 struct page *endpage = page + HPAGE_PMD_NR;
954
955 atomic_add(HPAGE_PMD_NR, &page->_count);
956 while (++page < endpage)
957 get_huge_page_tail(page);
958 } else {
959 get_page(page);
960 }
961}
962
963static void put_user_huge_page(struct page *page)
964{
965 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
966 struct page *endpage = page + HPAGE_PMD_NR;
967
968 while (page < endpage)
969 put_page(page++);
970 } else {
971 put_page(page);
972 }
973}
974
944static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 975static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
945 struct vm_area_struct *vma, 976 struct vm_area_struct *vma,
946 unsigned long address, 977 unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1074 ret |= VM_FAULT_WRITE; 1105 ret |= VM_FAULT_WRITE;
1075 goto out_unlock; 1106 goto out_unlock;
1076 } 1107 }
1077 get_page(page); 1108 get_user_huge_page(page);
1078 spin_unlock(ptl); 1109 spin_unlock(ptl);
1079alloc: 1110alloc:
1080 if (transparent_hugepage_enabled(vma) && 1111 if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
1095 split_huge_page(page); 1126 split_huge_page(page);
1096 ret |= VM_FAULT_FALLBACK; 1127 ret |= VM_FAULT_FALLBACK;
1097 } 1128 }
1098 put_page(page); 1129 put_user_huge_page(page);
1099 } 1130 }
1100 count_vm_event(THP_FAULT_FALLBACK); 1131 count_vm_event(THP_FAULT_FALLBACK);
1101 goto out; 1132 goto out;
@@ -1105,7 +1136,7 @@ alloc:
1105 put_page(new_page); 1136 put_page(new_page);
1106 if (page) { 1137 if (page) {
1107 split_huge_page(page); 1138 split_huge_page(page);
1108 put_page(page); 1139 put_user_huge_page(page);
1109 } else 1140 } else
1110 split_huge_page_pmd(vma, address, pmd); 1141 split_huge_page_pmd(vma, address, pmd);
1111 ret |= VM_FAULT_FALLBACK; 1142 ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
1127 1158
1128 spin_lock(ptl); 1159 spin_lock(ptl);
1129 if (page) 1160 if (page)
1130 put_page(page); 1161 put_user_huge_page(page);
1131 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1162 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1132 spin_unlock(ptl); 1163 spin_unlock(ptl);
1133 mem_cgroup_uncharge_page(new_page); 1164 mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
2392 pmd = mm_find_pmd(mm, address); 2423 pmd = mm_find_pmd(mm, address);
2393 if (!pmd) 2424 if (!pmd)
2394 goto out; 2425 goto out;
2395 if (pmd_trans_huge(*pmd))
2396 goto out;
2397 2426
2398 anon_vma_lock_write(vma->anon_vma); 2427 anon_vma_lock_write(vma->anon_vma);
2399 2428
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2492 pmd = mm_find_pmd(mm, address); 2521 pmd = mm_find_pmd(mm, address);
2493 if (!pmd) 2522 if (!pmd)
2494 goto out; 2523 goto out;
2495 if (pmd_trans_huge(*pmd))
2496 goto out;
2497 2524
2498 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2525 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2499 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2526 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2846static void split_huge_page_address(struct mm_struct *mm, 2873static void split_huge_page_address(struct mm_struct *mm,
2847 unsigned long address) 2874 unsigned long address)
2848{ 2875{
2876 pgd_t *pgd;
2877 pud_t *pud;
2849 pmd_t *pmd; 2878 pmd_t *pmd;
2850 2879
2851 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2880 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2852 2881
2853 pmd = mm_find_pmd(mm, address); 2882 pgd = pgd_offset(mm, address);
2854 if (!pmd) 2883 if (!pgd_present(*pgd))
2884 return;
2885
2886 pud = pud_offset(pgd, address);
2887 if (!pud_present(*pud))
2888 return;
2889
2890 pmd = pmd_offset(pud, address);
2891 if (!pmd_present(*pmd))
2855 return; 2892 return;
2856 /* 2893 /*
2857 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2894 * Caller holds the mmap_sem write mode, so a huge pmd cannot
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 226910cb7c9b..2024bbd573d2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2520 update_mmu_cache(vma, address, ptep); 2520 update_mmu_cache(vma, address, ptep);
2521} 2521}
2522 2522
2523static int is_hugetlb_entry_migration(pte_t pte)
2524{
2525 swp_entry_t swp;
2526
2527 if (huge_pte_none(pte) || pte_present(pte))
2528 return 0;
2529 swp = pte_to_swp_entry(pte);
2530 if (non_swap_entry(swp) && is_migration_entry(swp))
2531 return 1;
2532 else
2533 return 0;
2534}
2535
2536static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2537{
2538 swp_entry_t swp;
2539
2540 if (huge_pte_none(pte) || pte_present(pte))
2541 return 0;
2542 swp = pte_to_swp_entry(pte);
2543 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2544 return 1;
2545 else
2546 return 0;
2547}
2523 2548
2524int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 2549int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2525 struct vm_area_struct *vma) 2550 struct vm_area_struct *vma)
@@ -2559,10 +2584,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2559 dst_ptl = huge_pte_lock(h, dst, dst_pte); 2584 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2560 src_ptl = huge_pte_lockptr(h, src, src_pte); 2585 src_ptl = huge_pte_lockptr(h, src, src_pte);
2561 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 2586 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2562 if (!huge_pte_none(huge_ptep_get(src_pte))) { 2587 entry = huge_ptep_get(src_pte);
2588 if (huge_pte_none(entry)) { /* skip none entry */
2589 ;
2590 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2591 is_hugetlb_entry_hwpoisoned(entry))) {
2592 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2593
2594 if (is_write_migration_entry(swp_entry) && cow) {
2595 /*
2596 * COW mappings require pages in both
2597 * parent and child to be set to read.
2598 */
2599 make_migration_entry_read(&swp_entry);
2600 entry = swp_entry_to_pte(swp_entry);
2601 set_huge_pte_at(src, addr, src_pte, entry);
2602 }
2603 set_huge_pte_at(dst, addr, dst_pte, entry);
2604 } else {
2563 if (cow) 2605 if (cow)
2564 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2565 entry = huge_ptep_get(src_pte);
2566 ptepage = pte_page(entry); 2607 ptepage = pte_page(entry);
2567 get_page(ptepage); 2608 get_page(ptepage);
2568 page_dup_rmap(ptepage); 2609 page_dup_rmap(ptepage);
@@ -2578,32 +2619,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2578 return ret; 2619 return ret;
2579} 2620}
2580 2621
2581static int is_hugetlb_entry_migration(pte_t pte)
2582{
2583 swp_entry_t swp;
2584
2585 if (huge_pte_none(pte) || pte_present(pte))
2586 return 0;
2587 swp = pte_to_swp_entry(pte);
2588 if (non_swap_entry(swp) && is_migration_entry(swp))
2589 return 1;
2590 else
2591 return 0;
2592}
2593
2594static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2595{
2596 swp_entry_t swp;
2597
2598 if (huge_pte_none(pte) || pte_present(pte))
2599 return 0;
2600 swp = pte_to_swp_entry(pte);
2601 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2602 return 1;
2603 else
2604 return 0;
2605}
2606
2607void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 2622void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2608 unsigned long start, unsigned long end, 2623 unsigned long start, unsigned long end,
2609 struct page *ref_page) 2624 struct page *ref_page)
diff --git a/mm/ksm.c b/mm/ksm.c
index 68710e80994a..346ddc9e4c0d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
945 pmd = mm_find_pmd(mm, addr); 945 pmd = mm_find_pmd(mm, addr);
946 if (!pmd) 946 if (!pmd)
947 goto out; 947 goto out;
948 BUG_ON(pmd_trans_huge(*pmd));
949 948
950 mmun_start = addr; 949 mmun_start = addr;
951 mmun_end = addr + PAGE_SIZE; 950 mmun_end = addr + PAGE_SIZE;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 284974230459..eb58de19f815 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
656 * @nodes and @flags,) it's isolated and queued to the pagelist which is 656 * @nodes and @flags,) it's isolated and queued to the pagelist which is
657 * passed via @private.) 657 * passed via @private.)
658 */ 658 */
659static struct vm_area_struct * 659static int
660queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 660queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
661 const nodemask_t *nodes, unsigned long flags, void *private) 661 const nodemask_t *nodes, unsigned long flags, void *private)
662{ 662{
663 int err; 663 int err = 0;
664 struct vm_area_struct *first, *vma, *prev; 664 struct vm_area_struct *vma, *prev;
665
666 665
667 first = find_vma(mm, start); 666 vma = find_vma(mm, start);
668 if (!first) 667 if (!vma)
669 return ERR_PTR(-EFAULT); 668 return -EFAULT;
670 prev = NULL; 669 prev = NULL;
671 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 670 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
672 unsigned long endvma = vma->vm_end; 671 unsigned long endvma = vma->vm_end;
673 672
674 if (endvma > end) 673 if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
678 677
679 if (!(flags & MPOL_MF_DISCONTIG_OK)) { 678 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
680 if (!vma->vm_next && vma->vm_end < end) 679 if (!vma->vm_next && vma->vm_end < end)
681 return ERR_PTR(-EFAULT); 680 return -EFAULT;
682 if (prev && prev->vm_end < vma->vm_start) 681 if (prev && prev->vm_end < vma->vm_start)
683 return ERR_PTR(-EFAULT); 682 return -EFAULT;
684 } 683 }
685 684
686 if (flags & MPOL_MF_LAZY) { 685 if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
694 693
695 err = queue_pages_pgd_range(vma, start, endvma, nodes, 694 err = queue_pages_pgd_range(vma, start, endvma, nodes,
696 flags, private); 695 flags, private);
697 if (err) { 696 if (err)
698 first = ERR_PTR(err);
699 break; 697 break;
700 }
701 } 698 }
702next: 699next:
703 prev = vma; 700 prev = vma;
704 } 701 }
705 return first; 702 return err;
706} 703}
707 704
708/* 705/*
@@ -1156,16 +1153,17 @@ out:
1156 1153
1157/* 1154/*
1158 * Allocate a new page for page migration based on vma policy. 1155 * Allocate a new page for page migration based on vma policy.
1159 * Start assuming that page is mapped by vma pointed to by @private. 1156 * Start by assuming the page is mapped by the same vma as contains @start.
1160 * Search forward from there, if not. N.B., this assumes that the 1157 * Search forward from there, if not. N.B., this assumes that the
1161 * list of pages handed to migrate_pages()--which is how we get here-- 1158 * list of pages handed to migrate_pages()--which is how we get here--
1162 * is in virtual address order. 1159 * is in virtual address order.
1163 */ 1160 */
1164static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 1161static struct page *new_page(struct page *page, unsigned long start, int **x)
1165{ 1162{
1166 struct vm_area_struct *vma = (struct vm_area_struct *)private; 1163 struct vm_area_struct *vma;
1167 unsigned long uninitialized_var(address); 1164 unsigned long uninitialized_var(address);
1168 1165
1166 vma = find_vma(current->mm, start);
1169 while (vma) { 1167 while (vma) {
1170 address = page_address_in_vma(page, vma); 1168 address = page_address_in_vma(page, vma);
1171 if (address != -EFAULT) 1169 if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1195 return -ENOSYS; 1193 return -ENOSYS;
1196} 1194}
1197 1195
1198static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 1196static struct page *new_page(struct page *page, unsigned long start, int **x)
1199{ 1197{
1200 return NULL; 1198 return NULL;
1201} 1199}
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
1205 unsigned short mode, unsigned short mode_flags, 1203 unsigned short mode, unsigned short mode_flags,
1206 nodemask_t *nmask, unsigned long flags) 1204 nodemask_t *nmask, unsigned long flags)
1207{ 1205{
1208 struct vm_area_struct *vma;
1209 struct mm_struct *mm = current->mm; 1206 struct mm_struct *mm = current->mm;
1210 struct mempolicy *new; 1207 struct mempolicy *new;
1211 unsigned long end; 1208 unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
1271 if (err) 1268 if (err)
1272 goto mpol_out; 1269 goto mpol_out;
1273 1270
1274 vma = queue_pages_range(mm, start, end, nmask, 1271 err = queue_pages_range(mm, start, end, nmask,
1275 flags | MPOL_MF_INVERT, &pagelist); 1272 flags | MPOL_MF_INVERT, &pagelist);
1276 1273 if (!err)
1277 err = PTR_ERR(vma); /* maybe ... */
1278 if (!IS_ERR(vma))
1279 err = mbind_range(mm, start, end, new); 1274 err = mbind_range(mm, start, end, new);
1280 1275
1281 if (!err) { 1276 if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,
1283 1278
1284 if (!list_empty(&pagelist)) { 1279 if (!list_empty(&pagelist)) {
1285 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1280 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1286 nr_failed = migrate_pages(&pagelist, new_vma_page, 1281 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1287 NULL, (unsigned long)vma, 1282 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1288 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1289 if (nr_failed) 1283 if (nr_failed)
1290 putback_movable_pages(&pagelist); 1284 putback_movable_pages(&pagelist);
1291 } 1285 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 63f0cd559999..9e0beaa91845 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -120,8 +120,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
120 pmd = mm_find_pmd(mm, addr); 120 pmd = mm_find_pmd(mm, addr);
121 if (!pmd) 121 if (!pmd)
122 goto out; 122 goto out;
123 if (pmd_trans_huge(*pmd))
124 goto out;
125 123
126 ptep = pte_offset_map(pmd, addr); 124 ptep = pte_offset_map(pmd, addr);
127 125
diff --git a/mm/nommu.c b/mm/nommu.c
index b78e3a8f5ee7..4a852f6c5709 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -786,7 +786,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
786 for (i = 0; i < VMACACHE_SIZE; i++) { 786 for (i = 0; i < VMACACHE_SIZE; i++) {
787 /* if the vma is cached, invalidate the entire cache */ 787 /* if the vma is cached, invalidate the entire cache */
788 if (curr->vmacache[i] == vma) { 788 if (curr->vmacache[i] == vma) {
789 vmacache_invalidate(curr->mm); 789 vmacache_invalidate(mm);
790 break; 790 break;
791 } 791 }
792 } 792 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59fa29eda8..20d17f8266fe 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -69,6 +69,7 @@
69 69
70/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 70/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
71static DEFINE_MUTEX(pcp_batch_high_lock); 71static DEFINE_MUTEX(pcp_batch_high_lock);
72#define MIN_PERCPU_PAGELIST_FRACTION (8)
72 73
73#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
74DEFINE_PER_CPU(int, numa_node); 75DEFINE_PER_CPU(int, numa_node);
@@ -4145,7 +4146,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
4145 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4146 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4146#endif 4147#endif
4147 4148
4148static int __meminit zone_batchsize(struct zone *zone) 4149static int zone_batchsize(struct zone *zone)
4149{ 4150{
4150#ifdef CONFIG_MMU 4151#ifdef CONFIG_MMU
4151 int batch; 4152 int batch;
@@ -4261,8 +4262,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
4261 pageset_update(&p->pcp, high, batch); 4262 pageset_update(&p->pcp, high, batch);
4262} 4263}
4263 4264
4264static void __meminit pageset_set_high_and_batch(struct zone *zone, 4265static void pageset_set_high_and_batch(struct zone *zone,
4265 struct per_cpu_pageset *pcp) 4266 struct per_cpu_pageset *pcp)
4266{ 4267{
4267 if (percpu_pagelist_fraction) 4268 if (percpu_pagelist_fraction)
4268 pageset_set_high(pcp, 4269 pageset_set_high(pcp,
@@ -5881,23 +5882,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
5881 void __user *buffer, size_t *length, loff_t *ppos) 5882 void __user *buffer, size_t *length, loff_t *ppos)
5882{ 5883{
5883 struct zone *zone; 5884 struct zone *zone;
5884 unsigned int cpu; 5885 int old_percpu_pagelist_fraction;
5885 int ret; 5886 int ret;
5886 5887
5888 mutex_lock(&pcp_batch_high_lock);
5889 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
5890
5887 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5891 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5888 if (!write || (ret < 0)) 5892 if (!write || ret < 0)
5889 return ret; 5893 goto out;
5894
5895 /* Sanity checking to avoid pcp imbalance */
5896 if (percpu_pagelist_fraction &&
5897 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
5898 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
5899 ret = -EINVAL;
5900 goto out;
5901 }
5902
5903 /* No change? */
5904 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
5905 goto out;
5890 5906
5891 mutex_lock(&pcp_batch_high_lock);
5892 for_each_populated_zone(zone) { 5907 for_each_populated_zone(zone) {
5893 unsigned long high; 5908 unsigned int cpu;
5894 high = zone->managed_pages / percpu_pagelist_fraction; 5909
5895 for_each_possible_cpu(cpu) 5910 for_each_possible_cpu(cpu)
5896 pageset_set_high(per_cpu_ptr(zone->pageset, cpu), 5911 pageset_set_high_and_batch(zone,
5897 high); 5912 per_cpu_ptr(zone->pageset, cpu));
5898 } 5913 }
5914out:
5899 mutex_unlock(&pcp_batch_high_lock); 5915 mutex_unlock(&pcp_batch_high_lock);
5900 return 0; 5916 return ret;
5901} 5917}
5902 5918
5903int hashdist = HASHDIST_DEFAULT; 5919int hashdist = HASHDIST_DEFAULT;
diff --git a/mm/rmap.c b/mm/rmap.c
index bf05fc872ae8..b7e94ebbd09e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,6 +569,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
569 pgd_t *pgd; 569 pgd_t *pgd;
570 pud_t *pud; 570 pud_t *pud;
571 pmd_t *pmd = NULL; 571 pmd_t *pmd = NULL;
572 pmd_t pmde;
572 573
573 pgd = pgd_offset(mm, address); 574 pgd = pgd_offset(mm, address);
574 if (!pgd_present(*pgd)) 575 if (!pgd_present(*pgd))
@@ -579,7 +580,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
579 goto out; 580 goto out;
580 581
581 pmd = pmd_offset(pud, address); 582 pmd = pmd_offset(pud, address);
582 if (!pmd_present(*pmd)) 583 /*
584 * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
585 * without holding anon_vma lock for write. So when looking for a
586 * genuine pmde (in which to find pte), test present and !THP together.
587 */
588 pmde = ACCESS_ONCE(*pmd);
589 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
583 pmd = NULL; 590 pmd = NULL;
584out: 591out:
585 return pmd; 592 return pmd;
@@ -615,9 +622,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
615 if (!pmd) 622 if (!pmd)
616 return NULL; 623 return NULL;
617 624
618 if (pmd_trans_huge(*pmd))
619 return NULL;
620
621 pte = pte_offset_map(pmd, address); 625 pte = pte_offset_map(pmd, address);
622 /* Make a quick check before getting the lock */ 626 /* Make a quick check before getting the lock */
623 if (!sync && !pte_present(*pte)) { 627 if (!sync && !pte_present(*pte)) {
diff --git a/mm/shmem.c b/mm/shmem.c
index f484c276e994..8f419cff9e34 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
80#define SHORT_SYMLINK_LEN 128 80#define SHORT_SYMLINK_LEN 128
81 81
82/* 82/*
83 * shmem_fallocate and shmem_writepage communicate via inode->i_private 83 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
84 * (with i_mutex making sure that it has only one user at a time): 84 * inode->i_private (with i_mutex making sure that it has only one user at
85 * we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */
88 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
89 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
90 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -759,6 +760,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
759 spin_lock(&inode->i_lock); 760 spin_lock(&inode->i_lock);
760 shmem_falloc = inode->i_private; 761 shmem_falloc = inode->i_private;
761 if (shmem_falloc && 762 if (shmem_falloc &&
763 !shmem_falloc->mode &&
762 index >= shmem_falloc->start && 764 index >= shmem_falloc->start &&
763 index < shmem_falloc->next) 765 index < shmem_falloc->next)
764 shmem_falloc->nr_unswapped++; 766 shmem_falloc->nr_unswapped++;
@@ -1233,6 +1235,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1233 int error; 1235 int error;
1234 int ret = VM_FAULT_LOCKED; 1236 int ret = VM_FAULT_LOCKED;
1235 1237
1238 /*
1239 * Trinity finds that probing a hole which tmpfs is punching can
1240 * prevent the hole-punch from ever completing: which in turn
1241 * locks writers out with its hold on i_mutex. So refrain from
1242 * faulting pages into the hole while it's being punched, and
1243 * wait on i_mutex to be released if vmf->flags permits.
1244 */
1245 if (unlikely(inode->i_private)) {
1246 struct shmem_falloc *shmem_falloc;
1247
1248 spin_lock(&inode->i_lock);
1249 shmem_falloc = inode->i_private;
1250 if (!shmem_falloc ||
1251 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
1252 vmf->pgoff < shmem_falloc->start ||
1253 vmf->pgoff >= shmem_falloc->next)
1254 shmem_falloc = NULL;
1255 spin_unlock(&inode->i_lock);
1256 /*
1257 * i_lock has protected us from taking shmem_falloc seriously
1258 * once return from shmem_fallocate() went back up that stack.
1259 * i_lock does not serialize with i_mutex at all, but it does
1260 * not matter if sometimes we wait unnecessarily, or sometimes
1261 * miss out on waiting: we just need to make those cases rare.
1262 */
1263 if (shmem_falloc) {
1264 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1265 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1266 up_read(&vma->vm_mm->mmap_sem);
1267 mutex_lock(&inode->i_mutex);
1268 mutex_unlock(&inode->i_mutex);
1269 return VM_FAULT_RETRY;
1270 }
1271 /* cond_resched? Leave that to GUP or return to user */
1272 return VM_FAULT_NOPAGE;
1273 }
1274 }
1275
1236 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1276 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1237 if (error) 1277 if (error)
1238 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1278 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1724,20 +1764,31 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1724 pgoff_t start, index, end; 1764 pgoff_t start, index, end;
1725 int error; 1765 int error;
1726 1766
1767 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1768 return -EOPNOTSUPP;
1769
1727 mutex_lock(&inode->i_mutex); 1770 mutex_lock(&inode->i_mutex);
1728 1771
1772 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1773
1729 if (mode & FALLOC_FL_PUNCH_HOLE) { 1774 if (mode & FALLOC_FL_PUNCH_HOLE) {
1730 struct address_space *mapping = file->f_mapping; 1775 struct address_space *mapping = file->f_mapping;
1731 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1776 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1732 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1777 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1733 1778
1779 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1780 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1781 spin_lock(&inode->i_lock);
1782 inode->i_private = &shmem_falloc;
1783 spin_unlock(&inode->i_lock);
1784
1734 if ((u64)unmap_end > (u64)unmap_start) 1785 if ((u64)unmap_end > (u64)unmap_start)
1735 unmap_mapping_range(mapping, unmap_start, 1786 unmap_mapping_range(mapping, unmap_start,
1736 1 + unmap_end - unmap_start, 0); 1787 1 + unmap_end - unmap_start, 0);
1737 shmem_truncate_range(inode, offset, offset + len - 1); 1788 shmem_truncate_range(inode, offset, offset + len - 1);
1738 /* No need to unmap again: hole-punching leaves COWed pages */ 1789 /* No need to unmap again: hole-punching leaves COWed pages */
1739 error = 0; 1790 error = 0;
1740 goto out; 1791 goto undone;
1741 } 1792 }
1742 1793
1743 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1794 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
diff --git a/mm/slab.c b/mm/slab.c
index 9ca3b87edabc..3070b929a1bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
386 386
387#endif 387#endif
388 388
389#define OBJECT_FREE (0)
390#define OBJECT_ACTIVE (1)
391
392#ifdef CONFIG_DEBUG_SLAB_LEAK
393
394static void set_obj_status(struct page *page, int idx, int val)
395{
396 int freelist_size;
397 char *status;
398 struct kmem_cache *cachep = page->slab_cache;
399
400 freelist_size = cachep->num * sizeof(freelist_idx_t);
401 status = (char *)page->freelist + freelist_size;
402 status[idx] = val;
403}
404
405static inline unsigned int get_obj_status(struct page *page, int idx)
406{
407 int freelist_size;
408 char *status;
409 struct kmem_cache *cachep = page->slab_cache;
410
411 freelist_size = cachep->num * sizeof(freelist_idx_t);
412 status = (char *)page->freelist + freelist_size;
413
414 return status[idx];
415}
416
417#else
418static inline void set_obj_status(struct page *page, int idx, int val) {}
419
420#endif
421
389/* 422/*
390 * Do not go above this order unless 0 objects fit into the slab or 423 * Do not go above this order unless 0 objects fit into the slab or
391 * overridden on the command line. 424 * overridden on the command line.
@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
576 return cachep->array[smp_processor_id()]; 609 return cachep->array[smp_processor_id()];
577} 610}
578 611
612static size_t calculate_freelist_size(int nr_objs, size_t align)
613{
614 size_t freelist_size;
615
616 freelist_size = nr_objs * sizeof(freelist_idx_t);
617 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
618 freelist_size += nr_objs * sizeof(char);
619
620 if (align)
621 freelist_size = ALIGN(freelist_size, align);
622
623 return freelist_size;
624}
625
579static int calculate_nr_objs(size_t slab_size, size_t buffer_size, 626static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
580 size_t idx_size, size_t align) 627 size_t idx_size, size_t align)
581{ 628{
582 int nr_objs; 629 int nr_objs;
630 size_t remained_size;
583 size_t freelist_size; 631 size_t freelist_size;
632 int extra_space = 0;
584 633
634 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
635 extra_space = sizeof(char);
585 /* 636 /*
586 * Ignore padding for the initial guess. The padding 637 * Ignore padding for the initial guess. The padding
587 * is at most @align-1 bytes, and @buffer_size is at 638 * is at most @align-1 bytes, and @buffer_size is at
@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
590 * into the memory allocation when taking the padding 641 * into the memory allocation when taking the padding
591 * into account. 642 * into account.
592 */ 643 */
593 nr_objs = slab_size / (buffer_size + idx_size); 644 nr_objs = slab_size / (buffer_size + idx_size + extra_space);
594 645
595 /* 646 /*
596 * This calculated number will be either the right 647 * This calculated number will be either the right
597 * amount, or one greater than what we want. 648 * amount, or one greater than what we want.
598 */ 649 */
599 freelist_size = slab_size - nr_objs * buffer_size; 650 remained_size = slab_size - nr_objs * buffer_size;
600 if (freelist_size < ALIGN(nr_objs * idx_size, align)) 651 freelist_size = calculate_freelist_size(nr_objs, align);
652 if (remained_size < freelist_size)
601 nr_objs--; 653 nr_objs--;
602 654
603 return nr_objs; 655 return nr_objs;
@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
635 } else { 687 } else {
636 nr_objs = calculate_nr_objs(slab_size, buffer_size, 688 nr_objs = calculate_nr_objs(slab_size, buffer_size,
637 sizeof(freelist_idx_t), align); 689 sizeof(freelist_idx_t), align);
638 mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align); 690 mgmt_size = calculate_freelist_size(nr_objs, align);
639 } 691 }
640 *num = nr_objs; 692 *num = nr_objs;
641 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 693 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -2041,13 +2093,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2041 break; 2093 break;
2042 2094
2043 if (flags & CFLGS_OFF_SLAB) { 2095 if (flags & CFLGS_OFF_SLAB) {
2096 size_t freelist_size_per_obj = sizeof(freelist_idx_t);
2044 /* 2097 /*
2045 * Max number of objs-per-slab for caches which 2098 * Max number of objs-per-slab for caches which
2046 * use off-slab slabs. Needed to avoid a possible 2099 * use off-slab slabs. Needed to avoid a possible
2047 * looping condition in cache_grow(). 2100 * looping condition in cache_grow().
2048 */ 2101 */
2102 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
2103 freelist_size_per_obj += sizeof(char);
2049 offslab_limit = size; 2104 offslab_limit = size;
2050 offslab_limit /= sizeof(freelist_idx_t); 2105 offslab_limit /= freelist_size_per_obj;
2051 2106
2052 if (num > offslab_limit) 2107 if (num > offslab_limit)
2053 break; 2108 break;
@@ -2294,8 +2349,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2294 if (!cachep->num) 2349 if (!cachep->num)
2295 return -E2BIG; 2350 return -E2BIG;
2296 2351
2297 freelist_size = 2352 freelist_size = calculate_freelist_size(cachep->num, cachep->align);
2298 ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
2299 2353
2300 /* 2354 /*
2301 * If the slab has been placed off-slab, and we have enough space then 2355 * If the slab has been placed off-slab, and we have enough space then
@@ -2308,7 +2362,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2308 2362
2309 if (flags & CFLGS_OFF_SLAB) { 2363 if (flags & CFLGS_OFF_SLAB) {
2310 /* really off slab. No need for manual alignment */ 2364 /* really off slab. No need for manual alignment */
2311 freelist_size = cachep->num * sizeof(freelist_idx_t); 2365 freelist_size = calculate_freelist_size(cachep->num, 0);
2312 2366
2313#ifdef CONFIG_PAGE_POISONING 2367#ifdef CONFIG_PAGE_POISONING
2314 /* If we're going to use the generic kernel_map_pages() 2368 /* If we're going to use the generic kernel_map_pages()
@@ -2612,6 +2666,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2612 if (cachep->ctor) 2666 if (cachep->ctor)
2613 cachep->ctor(objp); 2667 cachep->ctor(objp);
2614#endif 2668#endif
2669 set_obj_status(page, i, OBJECT_FREE);
2615 set_free_obj(page, i, i); 2670 set_free_obj(page, i, i);
2616 } 2671 }
2617} 2672}
@@ -2820,6 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2820 BUG_ON(objnr >= cachep->num); 2875 BUG_ON(objnr >= cachep->num);
2821 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2876 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2822 2877
2878 set_obj_status(page, objnr, OBJECT_FREE);
2823 if (cachep->flags & SLAB_POISON) { 2879 if (cachep->flags & SLAB_POISON) {
2824#ifdef CONFIG_DEBUG_PAGEALLOC 2880#ifdef CONFIG_DEBUG_PAGEALLOC
2825 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2881 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2953,6 +3009,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2953static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3009static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2954 gfp_t flags, void *objp, unsigned long caller) 3010 gfp_t flags, void *objp, unsigned long caller)
2955{ 3011{
3012 struct page *page;
3013
2956 if (!objp) 3014 if (!objp)
2957 return objp; 3015 return objp;
2958 if (cachep->flags & SLAB_POISON) { 3016 if (cachep->flags & SLAB_POISON) {
@@ -2983,6 +3041,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2983 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3041 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2984 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3042 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2985 } 3043 }
3044
3045 page = virt_to_head_page(objp);
3046 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2986 objp += obj_offset(cachep); 3047 objp += obj_offset(cachep);
2987 if (cachep->ctor && cachep->flags & SLAB_POISON) 3048 if (cachep->ctor && cachep->flags & SLAB_POISON)
2988 cachep->ctor(objp); 3049 cachep->ctor(objp);
@@ -4219,21 +4280,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
4219 struct page *page) 4280 struct page *page)
4220{ 4281{
4221 void *p; 4282 void *p;
4222 int i, j; 4283 int i;
4223 4284
4224 if (n[0] == n[1]) 4285 if (n[0] == n[1])
4225 return; 4286 return;
4226 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4287 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4227 bool active = true; 4288 if (get_obj_status(page, i) != OBJECT_ACTIVE)
4228
4229 for (j = page->active; j < c->num; j++) {
4230 /* Skip freed item */
4231 if (get_free_obj(page, j) == i) {
4232 active = false;
4233 break;
4234 }
4235 }
4236 if (!active)
4237 continue; 4289 continue;
4238 4290
4239 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4291 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 9012b1c922b6..75d427763992 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
114 114
115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116{ 116{
117 if (skb_cow(skb, skb_headroom(skb)) < 0) 117 if (skb_cow(skb, skb_headroom(skb)) < 0) {
118 kfree_skb(skb);
118 return NULL; 119 return NULL;
120 }
121
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 122 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN; 123 skb->mac_header += VLAN_HLEN;
121 return skb; 124 return skb;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8671bc79a35b..ca01d1861854 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -610,11 +610,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
610 if (hci_update_random_address(req, false, &own_addr_type)) 610 if (hci_update_random_address(req, false, &own_addr_type))
611 return; 611 return;
612 612
613 /* Save the address type used for this connnection attempt so we able
614 * to retrieve this information if we need it.
615 */
616 conn->src_type = own_addr_type;
617
618 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 613 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 cp.scan_window = cpu_to_le16(hdev->le_scan_window); 614 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 bacpy(&cp.peer_addr, &conn->dst); 615 bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +889,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
894 /* If we're already encrypted set the REAUTH_PEND flag, 889 /* If we're already encrypted set the REAUTH_PEND flag,
895 * otherwise set the ENCRYPT_PEND. 890 * otherwise set the ENCRYPT_PEND.
896 */ 891 */
897 if (conn->key_type != 0xff) 892 if (conn->link_mode & HCI_LM_ENCRYPT)
898 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 893 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
899 else 894 else
900 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 895 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 21e5913d12e0..640c54ec1bd2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
54
51 hci_conn_check_pending(hdev); 55 hci_conn_check_pending(hdev);
52} 56}
53 57
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3537 cp.authentication = conn->auth_type; 3541 cp.authentication = conn->auth_type;
3538 3542
3539 /* Request MITM protection if our IO caps allow it 3543 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case 3544 * except for the no-bonding case.
3545 * conn->auth_type is not updated here since
3546 * that might cause the user confirmation to be
3547 * rejected in case the remote doesn't have the
3548 * IO capabilities for MITM.
3541 */ 3549 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3550 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING) 3551 cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3628 3636
3629 /* If we're not the initiators request authorization to 3637 /* If we're not the initiators request authorization to
3630 * proceed from user space (mgmt_user_confirm with 3638 * proceed from user space (mgmt_user_confirm with
3631 * confirm_hint set to 1). */ 3639 * confirm_hint set to 1). The exception is if neither
3632 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3640 * side had MITM in which case we do auto-accept.
3641 */
3642 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3643 (loc_mitm || rem_mitm)) {
3633 BT_DBG("Confirming auto-accept as acceptor"); 3644 BT_DBG("Confirming auto-accept as acceptor");
3634 confirm_hint = 1; 3645 confirm_hint = 1;
3635 goto confirm; 3646 goto confirm;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6eabbe05fe54..323f23cd2c37 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1663 kfree_skb(conn->rx_skb); 1663 kfree_skb(conn->rx_skb);
1664 1664
1665 skb_queue_purge(&conn->pending_rx); 1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work); 1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1667 1673
1668 l2cap_unregister_all_users(conn); 1674 l2cap_unregister_all_users(conn);
1669 1675
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ade3fb4c23bc..e1378693cc90 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
787 787
788 /*change security for LE channels */ 788 /*change security for LE channels */
789 if (chan->scid == L2CAP_CID_ATT) { 789 if (chan->scid == L2CAP_CID_ATT) {
790 if (!conn->hcon->out) {
791 err = -EINVAL;
792 break;
793 }
794
795 if (smp_conn_security(conn->hcon, sec.level)) 790 if (smp_conn_security(conn->hcon, sec.level))
796 break; 791 break;
797 sk->sk_state = BT_CONFIG; 792 sk->sk_state = BT_CONFIG;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 0fce54412ffd..af8e0a6243b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1047 }
1048} 1048}
1049 1049
1050static void hci_stop_discovery(struct hci_request *req)
1051{
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1055
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 } else {
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1063 }
1064
1065 break;
1066
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING);
1070 if (!e)
1071 return;
1072
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp);
1076
1077 break;
1078
1079 default:
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1083 break;
1084 }
1085}
1086
1050static int clean_up_hci_state(struct hci_dev *hdev) 1087static int clean_up_hci_state(struct hci_dev *hdev)
1051{ 1088{
1052 struct hci_request req; 1089 struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1063 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 disable_advertising(&req); 1101 disable_advertising(&req);
1065 1102
1066 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 1103 hci_stop_discovery(&req);
1067 hci_req_add_le_scan_disable(&req);
1068 }
1069 1104
1070 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 struct hci_cp_disconnect dc; 1106 struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2996 } 3031 }
2997 3032
2998 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 /* Continue with pairing via SMP */ 3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3000 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3001 3041
3002 if (!err) 3042 if (!err)
3003 err = cmd_complete(sk, hdev->id, mgmt_op, 3043 err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3574{ 3614{
3575 struct mgmt_cp_stop_discovery *mgmt_cp = data; 3615 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 struct pending_cmd *cmd; 3616 struct pending_cmd *cmd;
3577 struct hci_cp_remote_name_req_cancel cp;
3578 struct inquiry_entry *e;
3579 struct hci_request req; 3617 struct hci_request req;
3580 int err; 3618 int err;
3581 3619
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3605 3643
3606 hci_req_init(&req, hdev); 3644 hci_req_init(&req, hdev);
3607 3645
3608 switch (hdev->discovery.state) { 3646 hci_stop_discovery(&req);
3609 case DISCOVERY_FINDING:
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3612 } else {
3613 cancel_delayed_work(&hdev->le_scan_disable);
3614
3615 hci_req_add_le_scan_disable(&req);
3616 }
3617
3618 break;
3619 3647
3620 case DISCOVERY_RESOLVING: 3648 err = hci_req_run(&req, stop_discovery_complete);
3621 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 3649 if (!err) {
3622 NAME_PENDING); 3650 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3623 if (!e) {
3624 mgmt_pending_remove(cmd);
3625 err = cmd_complete(sk, hdev->id,
3626 MGMT_OP_STOP_DISCOVERY, 0,
3627 &mgmt_cp->type,
3628 sizeof(mgmt_cp->type));
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 goto unlock;
3631 }
3632
3633 bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3635 &cp);
3636
3637 break;
3638
3639 default:
3640 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3641
3642 mgmt_pending_remove(cmd);
3643 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 sizeof(mgmt_cp->type));
3646 goto unlock; 3651 goto unlock;
3647 } 3652 }
3648 3653
3649 err = hci_req_run(&req, stop_discovery_complete); 3654 mgmt_pending_remove(cmd);
3650 if (err < 0) 3655
3651 mgmt_pending_remove(cmd); 3656 /* If no HCI commands were sent we're done */
3652 else 3657 if (err == -ENODATA) {
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 3658 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 }
3654 3662
3655unlock: 3663unlock:
3656 hci_dev_unlock(hdev); 3664 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d1cc164557d..f2829a7932e2 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -544,7 +544,7 @@ static u8 smp_random(struct smp_chan *smp)
544 hci_le_start_enc(hcon, ediv, rand, stk); 544 hci_le_start_enc(hcon, ediv, rand, stk);
545 hcon->enc_key_size = smp->enc_key_size; 545 hcon->enc_key_size = smp->enc_key_size;
546 } else { 546 } else {
547 u8 stk[16]; 547 u8 stk[16], auth;
548 __le64 rand = 0; 548 __le64 rand = 0;
549 __le16 ediv = 0; 549 __le16 ediv = 0;
550 550
@@ -556,8 +556,13 @@ static u8 smp_random(struct smp_chan *smp)
556 memset(stk + smp->enc_key_size, 0, 556 memset(stk + smp->enc_key_size, 0,
557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
558 558
559 if (hcon->pending_sec_level == BT_SECURITY_HIGH)
560 auth = 1;
561 else
562 auth = 0;
563
559 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 564 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
560 HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size, 565 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
561 ediv, rand); 566 ediv, rand);
562 } 567 }
563 568
diff --git a/net/core/dst.c b/net/core/dst.c
index 80d6286c8b62..a028409ee438 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -269,6 +269,15 @@ again:
269} 269}
270EXPORT_SYMBOL(dst_destroy); 270EXPORT_SYMBOL(dst_destroy);
271 271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279}
280
272void dst_release(struct dst_entry *dst) 281void dst_release(struct dst_entry *dst)
273{ 282{
274 if (dst) { 283 if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
276 285
277 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
278 WARN_ON(newrefcnt < 0); 287 WARN_ON(newrefcnt < 0);
279 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
280 dst = dst_destroy(dst); 289 call_rcu(&dst->rcu_head, dst_destroy_rcu);
281 if (dst)
282 __dst_free(dst);
283 }
284 } 290 }
285} 291}
286EXPORT_SYMBOL(dst_release); 292EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 735fad897496..1dbf6462f766 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
842 842
843 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len > BPF_MAXINSNS)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 if (new_prog) { 846 if (new_prog) {
847 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL); 847 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
848 if (!addrs) 848 if (!addrs)
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
1101 1101
1102 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103 1103
1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
1105 if (!masks) 1105 if (!masks)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1383 if (fp_new) { 1383 if (fp_new) {
1384 *fp_new = *fp; 1384 *fp_new = *fp;
1385 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're keeping orig_prog in fp_new along,
1386 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1387 * from the old fp. 1387 * from the old fp.
1388 */ 1388 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1524 1524
1525/** 1525/**
1526 * sk_unattached_filter_create - create an unattached filter 1526 * sk_unattached_filter_create - create an unattached filter
1527 * @fprog: the filter program
1528 * @pfp: the unattached filter that is created 1527 * @pfp: the unattached filter that is created
1528 * @fprog: the filter program
1529 * 1529 *
1530 * Create a filter independent of any socket. We first run some 1530 * Create a filter independent of any socket. We first run some
1531 * sanity checks on it to make sure it does not explode on us later. 1531 * sanity checks on it to make sure it does not explode on us later.
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9cd5344fad73..c1a33033cbe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2993 skb_put(nskb, len), 2993 skb_put(nskb, len),
2994 len, 0); 2994 len, 0);
2995 SKB_GSO_CB(nskb)->csum_start = 2995 SKB_GSO_CB(nskb)->csum_start =
2996 skb_headroom(nskb) + offset; 2996 skb_headroom(nskb) + doffset;
2997 continue; 2997 continue;
2998 } 2998 }
2999 2999
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 097b3e7c1e8f..54b6731dab55 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
73{ 73{
74 struct dst_entry *old_dst; 74 struct dst_entry *old_dst;
75 75
76 if (dst) { 76 dst_clone(dst);
77 if (dst->flags & DST_NOCACHE)
78 dst = NULL;
79 else
80 dst_clone(dst);
81 }
82 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); 77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
83 dst_release(old_dst); 78 dst_release(old_dst);
84} 79}
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
108 103
109 rcu_read_lock(); 104 rcu_read_lock();
110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
107 dst = NULL;
111 if (dst) { 108 if (dst) {
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
113 rcu_read_unlock();
114 tunnel_dst_reset(t); 110 tunnel_dst_reset(t);
115 return NULL; 111 dst_release(dst);
112 dst = NULL;
116 } 113 }
117 dst_hold(dst);
118 } 114 }
119 rcu_read_unlock(); 115 rcu_read_unlock();
120 return (struct rtable *)dst; 116 return (struct rtable *)dst;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 62e48cf84e60..9771563ab564 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
131 struct dst_entry *dst, 131 struct dst_entry *dst,
132 struct request_sock *req) 132 struct request_sock *req)
133{ 133{
134 struct tcp_sock *tp = tcp_sk(sk); 134 struct tcp_sock *tp;
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child; 136 struct sock *child;
137 137
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 40661fc1e233..b5c23756965a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1162 unsigned int new_len = (pkt_len / mss) * mss; 1162 unsigned int new_len = (pkt_len / mss) * mss;
1163 if (!in_sack && new_len < pkt_len) { 1163 if (!in_sack && new_len < pkt_len) {
1164 new_len += mss; 1164 new_len += mss;
1165 if (new_len > skb->len) 1165 if (new_len >= skb->len)
1166 return 0; 1166 return 0;
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c42e83d2751c..581a6584ed0c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3778 cancel_delayed_work_sync(&ipvs->defense_work); 3778 cancel_delayed_work_sync(&ipvs->defense_work);
3779 cancel_work_sync(&ipvs->defense_work.work); 3779 cancel_work_sync(&ipvs->defense_work.work);
3780 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3780 unregister_net_sysctl_table(ipvs->sysctl_hdr);
3781 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3781} 3782}
3782 3783
3783#else 3784#else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3840 struct netns_ipvs *ipvs = net_ipvs(net); 3841 struct netns_ipvs *ipvs = net_ipvs(net);
3841 3842
3842 ip_vs_trash_cleanup(net); 3843 ip_vs_trash_cleanup(net);
3843 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3844 ip_vs_control_net_cleanup_sysctl(net); 3844 ip_vs_control_net_cleanup_sysctl(net);
3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net); 3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3846 remove_proc_entry("ip_vs_stats", net->proc_net); 3846 remove_proc_entry("ip_vs_stats", net->proc_net);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 58579634427d..300ed1eec729 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -597,6 +597,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
597#ifdef CONFIG_NF_CONNTRACK_MARK 597#ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599#endif 599#endif
600#ifdef CONFIG_NF_CONNTRACK_ZONES
601 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
602#endif
600 + ctnetlink_proto_size(ct) 603 + ctnetlink_proto_size(ct)
601 + ctnetlink_label_size(ct) 604 + ctnetlink_label_size(ct)
602 ; 605 ;
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
1150static int 1153static int
1151ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) 1154ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1152{ 1155{
1153 struct nf_conn *ct, *last = NULL; 1156 struct nf_conn *ct, *last;
1154 struct nf_conntrack_tuple_hash *h; 1157 struct nf_conntrack_tuple_hash *h;
1155 struct hlist_nulls_node *n; 1158 struct hlist_nulls_node *n;
1156 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1159 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1163 if (cb->args[2]) 1166 if (cb->args[2])
1164 return 0; 1167 return 0;
1165 1168
1166 if (cb->args[0] == nr_cpu_ids) 1169 last = (struct nf_conn *)cb->args[1];
1167 return 0;
1168 1170
1169 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 1171 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1170 struct ct_pcpu *pcpu; 1172 struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1174 1176
1175 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 1177 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1176 spin_lock_bh(&pcpu->lock); 1178 spin_lock_bh(&pcpu->lock);
1177 last = (struct nf_conn *)cb->args[1];
1178 list = dying ? &pcpu->dying : &pcpu->unconfirmed; 1179 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1179restart: 1180restart:
1180 hlist_nulls_for_each_entry(h, n, list, hnnode) { 1181 hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
1193 ct); 1194 ct);
1194 rcu_read_unlock(); 1195 rcu_read_unlock();
1195 if (res < 0) { 1196 if (res < 0) {
1196 nf_conntrack_get(&ct->ct_general); 1197 if (!atomic_inc_not_zero(&ct->ct_general.use))
1198 continue;
1199 cb->args[0] = cpu;
1197 cb->args[1] = (unsigned long)ct; 1200 cb->args[1] = (unsigned long)ct;
1198 spin_unlock_bh(&pcpu->lock); 1201 spin_unlock_bh(&pcpu->lock);
1199 goto out; 1202 goto out;
@@ -1202,10 +1205,10 @@ restart:
1202 if (cb->args[1]) { 1205 if (cb->args[1]) {
1203 cb->args[1] = 0; 1206 cb->args[1] = 0;
1204 goto restart; 1207 goto restart;
1205 } else 1208 }
1206 cb->args[2] = 1;
1207 spin_unlock_bh(&pcpu->lock); 1209 spin_unlock_bh(&pcpu->lock);
1208 } 1210 }
1211 cb->args[2] = 1;
1209out: 1212out:
1210 if (last) 1213 if (last)
1211 nf_ct_put(last); 1214 nf_ct_put(last);
@@ -2040,6 +2043,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2040#ifdef CONFIG_NF_CONNTRACK_MARK 2043#ifdef CONFIG_NF_CONNTRACK_MARK
2041 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2044 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2042#endif 2045#endif
2046#ifdef CONFIG_NF_CONNTRACK_ZONES
2047 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
2048#endif
2043 + ctnetlink_proto_size(ct) 2049 + ctnetlink_proto_size(ct)
2044 ; 2050 ;
2045} 2051}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 09096a670c45..a49907b1dabc 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
525 return i->status & IPS_NAT_MASK ? 1 : 0; 525 return i->status & IPS_NAT_MASK ? 1 : 0;
526} 526}
527 527
528static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
529{
530 struct nf_conn_nat *nat = nfct_nat(ct);
531
532 if (nf_nat_proto_remove(ct, data))
533 return 1;
534
535 if (!nat || !nat->ct)
536 return 0;
537
538 /* This netns is being destroyed, and conntrack has nat null binding.
539 * Remove it from bysource hash, as the table will be freed soon.
540 *
541 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
542 * will delete entry from already-freed table.
543 */
544 if (!del_timer(&ct->timeout))
545 return 1;
546
547 spin_lock_bh(&nf_nat_lock);
548 hlist_del_rcu(&nat->bysource);
549 ct->status &= ~IPS_NAT_DONE_MASK;
550 nat->ct = NULL;
551 spin_unlock_bh(&nf_nat_lock);
552
553 add_timer(&ct->timeout);
554
555 /* don't delete conntrack. Although that would make things a lot
556 * simpler, we'd end up flushing all conntracks on nat rmmod.
557 */
558 return 0;
559}
560
528static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 561static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
529{ 562{
530 struct nf_nat_proto_clean clean = { 563 struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
795{ 828{
796 struct nf_nat_proto_clean clean = {}; 829 struct nf_nat_proto_clean clean = {};
797 830
798 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0); 831 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
799 synchronize_rcu(); 832 synchronize_rcu();
800 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 833 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
801} 834}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 624e083125b9..ab4566cfcbe4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) 1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
1731 return -EINVAL; 1731 return -EINVAL;
1732 handle = nf_tables_alloc_handle(table); 1732 handle = nf_tables_alloc_handle(table);
1733
1734 if (chain->use == UINT_MAX)
1735 return -EOVERFLOW;
1733 } 1736 }
1734 1737
1735 if (nla[NFTA_RULE_POSITION]) { 1738 if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1789 1792
1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1793 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1791 if (nft_rule_is_active_next(net, old_rule)) { 1794 if (nft_rule_is_active_next(net, old_rule)) {
1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, 1795 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
1793 old_rule); 1796 old_rule);
1794 if (trans == NULL) { 1797 if (trans == NULL) {
1795 err = -ENOMEM; 1798 err = -ENOMEM;
1796 goto err2; 1799 goto err2;
1797 } 1800 }
1798 nft_rule_disactivate_next(net, old_rule); 1801 nft_rule_disactivate_next(net, old_rule);
1799 list_add_tail(&rule->list, &old_rule->list); 1802 chain->use--;
1803 list_add_tail_rcu(&rule->list, &old_rule->list);
1800 } else { 1804 } else {
1801 err = -ENOENT; 1805 err = -ENOENT;
1802 goto err2; 1806 goto err2;
@@ -1826,6 +1830,7 @@ err3:
1826 list_del_rcu(&nft_trans_rule(trans)->list); 1830 list_del_rcu(&nft_trans_rule(trans)->list);
1827 nft_rule_clear(net, nft_trans_rule(trans)); 1831 nft_rule_clear(net, nft_trans_rule(trans));
1828 nft_trans_destroy(trans); 1832 nft_trans_destroy(trans);
1833 chain->use++;
1829 } 1834 }
1830err2: 1835err2:
1831 nf_tables_rule_destroy(&ctx, rule); 1836 nf_tables_rule_destroy(&ctx, rule);
@@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2845 goto nla_put_failure; 2850 goto nla_put_failure;
2846 2851
2847 nfmsg = nlmsg_data(nlh); 2852 nfmsg = nlmsg_data(nlh);
2848 nfmsg->nfgen_family = NFPROTO_UNSPEC; 2853 nfmsg->nfgen_family = ctx.afi->family;
2849 nfmsg->version = NFNETLINK_V0; 2854 nfmsg->version = NFNETLINK_V0;
2850 nfmsg->res_id = 0; 2855 nfmsg->res_id = 0;
2851 2856
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8a779be832fb..1840989092ed 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -195,6 +195,15 @@ static void
195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
196{ 196{
197 struct xt_target *target = expr->ops->data; 197 struct xt_target *target = expr->ops->data;
198 void *info = nft_expr_priv(expr);
199 struct xt_tgdtor_param par;
200
201 par.net = ctx->net;
202 par.target = target;
203 par.targinfo = info;
204 par.family = ctx->afi->family;
205 if (par.target->destroy != NULL)
206 par.target->destroy(&par);
198 207
199 module_put(target->me); 208 module_put(target->me);
200} 209}
@@ -382,6 +391,15 @@ static void
382nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 391nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
383{ 392{
384 struct xt_match *match = expr->ops->data; 393 struct xt_match *match = expr->ops->data;
394 void *info = nft_expr_priv(expr);
395 struct xt_mtdtor_param par;
396
397 par.net = ctx->net;
398 par.match = match;
399 par.matchinfo = info;
400 par.family = ctx->afi->family;
401 if (par.match->destroy != NULL)
402 par.match->destroy(&par);
385 403
386 module_put(match->me); 404 module_put(match->me);
387} 405}
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index a0195d28bcfc..79ff58cd36dc 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
175 if (nla_put_be32(skb, 175 if (nla_put_be32(skb,
176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) 176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
177 goto nla_put_failure; 177 goto nla_put_failure;
178 if (nla_put_be32(skb, 178 if (priv->sreg_proto_min) {
179 NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min))) 179 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
180 goto nla_put_failure; 180 htonl(priv->sreg_proto_min)))
181 if (nla_put_be32(skb, 181 goto nla_put_failure;
182 NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) 182 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
183 goto nla_put_failure; 183 htonl(priv->sreg_proto_max)))
184 goto nla_put_failure;
185 }
184 return 0; 186 return 0;
185 187
186nla_put_failure: 188nla_put_failure:
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index dcb19592761e..12c7e01c2677 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
321 loff_t *ppos) 321 loff_t *ppos)
322{ 322{
323 struct net *net = current->nsproxy->net_ns; 323 struct net *net = current->nsproxy->net_ns;
324 char tmp[8];
325 struct ctl_table tbl; 324 struct ctl_table tbl;
326 int ret; 325 bool changed = false;
327 int changed = 0;
328 char *none = "none"; 326 char *none = "none";
327 char tmp[8];
328 int ret;
329 329
330 memset(&tbl, 0, sizeof(struct ctl_table)); 330 memset(&tbl, 0, sizeof(struct ctl_table));
331 331
332 if (write) { 332 if (write) {
333 tbl.data = tmp; 333 tbl.data = tmp;
334 tbl.maxlen = 8; 334 tbl.maxlen = sizeof(tmp);
335 } else { 335 } else {
336 tbl.data = net->sctp.sctp_hmac_alg ? : none; 336 tbl.data = net->sctp.sctp_hmac_alg ? : none;
337 tbl.maxlen = strlen(tbl.data); 337 tbl.maxlen = strlen(tbl.data);
338 } 338 }
339 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
340 339
341 if (write) { 340 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
341 if (write && ret == 0) {
342#ifdef CONFIG_CRYPTO_MD5 342#ifdef CONFIG_CRYPTO_MD5
343 if (!strncmp(tmp, "md5", 3)) { 343 if (!strncmp(tmp, "md5", 3)) {
344 net->sctp.sctp_hmac_alg = "md5"; 344 net->sctp.sctp_hmac_alg = "md5";
345 changed = 1; 345 changed = true;
346 } 346 }
347#endif 347#endif
348#ifdef CONFIG_CRYPTO_SHA1 348#ifdef CONFIG_CRYPTO_SHA1
349 if (!strncmp(tmp, "sha1", 4)) { 349 if (!strncmp(tmp, "sha1", 4)) {
350 net->sctp.sctp_hmac_alg = "sha1"; 350 net->sctp.sctp_hmac_alg = "sha1";
351 changed = 1; 351 changed = true;
352 } 352 }
353#endif 353#endif
354 if (!strncmp(tmp, "none", 4)) { 354 if (!strncmp(tmp, "none", 4)) {
355 net->sctp.sctp_hmac_alg = NULL; 355 net->sctp.sctp_hmac_alg = NULL;
356 changed = 1; 356 changed = true;
357 } 357 }
358
359 if (!changed) 358 if (!changed)
360 ret = -EINVAL; 359 ret = -EINVAL;
361 } 360 }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
368 loff_t *ppos) 367 loff_t *ppos)
369{ 368{
370 struct net *net = current->nsproxy->net_ns; 369 struct net *net = current->nsproxy->net_ns;
371 int new_value;
372 struct ctl_table tbl;
373 unsigned int min = *(unsigned int *) ctl->extra1; 370 unsigned int min = *(unsigned int *) ctl->extra1;
374 unsigned int max = *(unsigned int *) ctl->extra2; 371 unsigned int max = *(unsigned int *) ctl->extra2;
375 int ret; 372 struct ctl_table tbl;
373 int ret, new_value;
376 374
377 memset(&tbl, 0, sizeof(struct ctl_table)); 375 memset(&tbl, 0, sizeof(struct ctl_table));
378 tbl.maxlen = sizeof(unsigned int); 376 tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
381 tbl.data = &new_value; 379 tbl.data = &new_value;
382 else 380 else
383 tbl.data = &net->sctp.rto_min; 381 tbl.data = &net->sctp.rto_min;
382
384 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 383 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
385 if (write) { 384 if (write && ret == 0) {
386 if (ret || new_value > max || new_value < min) 385 if (new_value > max || new_value < min)
387 return -EINVAL; 386 return -EINVAL;
387
388 net->sctp.rto_min = new_value; 388 net->sctp.rto_min = new_value;
389 } 389 }
390
390 return ret; 391 return ret;
391} 392}
392 393
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
395 loff_t *ppos) 396 loff_t *ppos)
396{ 397{
397 struct net *net = current->nsproxy->net_ns; 398 struct net *net = current->nsproxy->net_ns;
398 int new_value;
399 struct ctl_table tbl;
400 unsigned int min = *(unsigned int *) ctl->extra1; 399 unsigned int min = *(unsigned int *) ctl->extra1;
401 unsigned int max = *(unsigned int *) ctl->extra2; 400 unsigned int max = *(unsigned int *) ctl->extra2;
402 int ret; 401 struct ctl_table tbl;
402 int ret, new_value;
403 403
404 memset(&tbl, 0, sizeof(struct ctl_table)); 404 memset(&tbl, 0, sizeof(struct ctl_table));
405 tbl.maxlen = sizeof(unsigned int); 405 tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
408 tbl.data = &new_value; 408 tbl.data = &new_value;
409 else 409 else
410 tbl.data = &net->sctp.rto_max; 410 tbl.data = &net->sctp.rto_max;
411
411 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 412 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
412 if (write) { 413 if (write && ret == 0) {
413 if (ret || new_value > max || new_value < min) 414 if (new_value > max || new_value < min)
414 return -EINVAL; 415 return -EINVAL;
416
415 net->sctp.rto_max = new_value; 417 net->sctp.rto_max = new_value;
416 } 418 }
419
417 return ret; 420 return ret;
418} 421}
419 422
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
444 tbl.data = &net->sctp.auth_enable; 447 tbl.data = &net->sctp.auth_enable;
445 448
446 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 449 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
447 450 if (write && ret == 0) {
448 if (write) {
449 struct sock *sk = net->sctp.ctl_sock; 451 struct sock *sk = net->sctp.ctl_sock;
450 452
451 net->sctp.auth_enable = new_value; 453 net->sctp.auth_enable = new_value;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 247e973544bf..f77366717420 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -592,6 +592,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
592 put_group_info(acred.group_info); 592 put_group_info(acred.group_info);
593 return ret; 593 return ret;
594} 594}
595EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
595 596
596void 597void
597rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, 598rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 6af373236d73..4b0113f73ee9 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -56,7 +56,8 @@
56 * struct: This defines the way the data will be stored in the ring buffer. 56 * struct: This defines the way the data will be stored in the ring buffer.
57 * There are currently two types of elements. __field and __array. 57 * There are currently two types of elements. __field and __array.
58 * a __field is broken up into (type, name). Where type can be any 58 * a __field is broken up into (type, name). Where type can be any
59 * type but an array. 59 * primitive type (integer, long or pointer). __field_struct() can
60 * be any static complex data value (struct, union, but not an array).
60 * For an array. there are three fields. (type, name, size). The 61 * For an array. there are three fields. (type, name, size). The
61 * type of elements in the array, the name of the field and the size 62 * type of elements in the array, the name of the field and the size
62 * of the array. 63 * of the array.
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 010b18ef4ea0..182be0f12407 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3476,12 +3476,17 @@ sub process {
3476 } 3476 }
3477 } 3477 }
3478 3478
3479# unnecessary return in a void function? (a single leading tab, then return;) 3479# unnecessary return in a void function
3480 if ($sline =~ /^\+\treturn\s*;\s*$/ && 3480# at end-of-function, with the previous line a single leading tab, then return;
3481 $prevline =~ /^\+/) { 3481# and the line before that not a goto label target like "out:"
3482 if ($sline =~ /^[ \+]}\s*$/ &&
3483 $prevline =~ /^\+\treturn\s*;\s*$/ &&
3484 $linenr >= 3 &&
3485 $lines[$linenr - 3] =~ /^[ +]/ &&
3486 $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
3482 WARN("RETURN_VOID", 3487 WARN("RETURN_VOID",
3483 "void function return statements are not generally useful\n" . $herecurr); 3488 "void function return statements are not generally useful\n" . $hereprev);
3484 } 3489 }
3485 3490
3486# if statements using unnecessary parentheses - ie: if ((foo == bar)) 3491# if statements using unnecessary parentheses - ie: if ((foo == bar))
3487 if ($^V && $^V ge 5.10.0 && 3492 if ($^V && $^V ge 5.10.0 &&
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 9d1421e63ff8..49b582a225b0 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
163 163
164static int MIPS_is_fake_mcount(Elf_Rel const *rp) 164static int MIPS_is_fake_mcount(Elf_Rel const *rp)
165{ 165{
166 static Elf_Addr old_r_offset; 166 static Elf_Addr old_r_offset = ~(Elf_Addr)0;
167 Elf_Addr current_r_offset = _w(rp->r_offset); 167 Elf_Addr current_r_offset = _w(rp->r_offset);
168 int is_fake; 168 int is_fake;
169 169
170 is_fake = old_r_offset && 170 is_fake = (old_r_offset != ~(Elf_Addr)0) &&
171 (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET); 171 (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
172 old_r_offset = current_r_offset; 172 old_r_offset = current_r_offset;
173 173
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index b684c6e4f301..dabe41975a9d 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -898,6 +898,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
898 if (!strcmp(codec->modelname, models->name)) { 898 if (!strcmp(codec->modelname, models->name)) {
899 codec->fixup_id = models->id; 899 codec->fixup_id = models->id;
900 codec->fixup_name = models->name; 900 codec->fixup_name = models->name;
901 codec->fixup_list = fixlist;
901 codec->fixup_forced = 1; 902 codec->fixup_forced = 1;
902 return; 903 return;
903 } 904 }
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 23fd6b9aecca..25753db97071 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -288,6 +288,24 @@ static char *driver_short_names[] = {
288 [AZX_DRIVER_GENERIC] = "HD-Audio Generic", 288 [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
289}; 289};
290 290
291
292/* Intel HSW/BDW display HDA controller Extended Mode registers.
293 * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
294 * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
295 * The values will be lost when the display power well is disabled.
296 */
297#define ICH6_REG_EM4 0x100c
298#define ICH6_REG_EM5 0x1010
299
300struct hda_intel {
301 struct azx chip;
302
303 /* HSW/BDW display HDA controller to restore BCLK from CDCLK */
304 unsigned int bclk_m;
305 unsigned int bclk_n;
306};
307
308
291#ifdef CONFIG_X86 309#ifdef CONFIG_X86
292static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on) 310static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
293{ 311{
@@ -580,6 +598,22 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
580#define azx_del_card_list(chip) /* NOP */ 598#define azx_del_card_list(chip) /* NOP */
581#endif /* CONFIG_PM */ 599#endif /* CONFIG_PM */
582 600
601static void haswell_save_bclk(struct azx *chip)
602{
603 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
604
605 hda->bclk_m = azx_readw(chip, EM4);
606 hda->bclk_n = azx_readw(chip, EM5);
607}
608
609static void haswell_restore_bclk(struct azx *chip)
610{
611 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
612
613 azx_writew(chip, EM4, hda->bclk_m);
614 azx_writew(chip, EM5, hda->bclk_n);
615}
616
583#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO) 617#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
584/* 618/*
585 * power management 619 * power management
@@ -606,6 +640,13 @@ static int azx_suspend(struct device *dev)
606 free_irq(chip->irq, chip); 640 free_irq(chip->irq, chip);
607 chip->irq = -1; 641 chip->irq = -1;
608 } 642 }
643
644 /* Save BCLK M/N values before they become invalid in D3.
645 * Will test if display power well can be released now.
646 */
647 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
648 haswell_save_bclk(chip);
649
609 if (chip->msi) 650 if (chip->msi)
610 pci_disable_msi(chip->pci); 651 pci_disable_msi(chip->pci);
611 pci_disable_device(pci); 652 pci_disable_device(pci);
@@ -625,8 +666,10 @@ static int azx_resume(struct device *dev)
625 if (chip->disabled) 666 if (chip->disabled)
626 return 0; 667 return 0;
627 668
628 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 669 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
629 hda_display_power(true); 670 hda_display_power(true);
671 haswell_restore_bclk(chip);
672 }
630 pci_set_power_state(pci, PCI_D0); 673 pci_set_power_state(pci, PCI_D0);
631 pci_restore_state(pci); 674 pci_restore_state(pci);
632 if (pci_enable_device(pci) < 0) { 675 if (pci_enable_device(pci) < 0) {
@@ -670,8 +713,10 @@ static int azx_runtime_suspend(struct device *dev)
670 azx_stop_chip(chip); 713 azx_stop_chip(chip);
671 azx_enter_link_reset(chip); 714 azx_enter_link_reset(chip);
672 azx_clear_irq_pending(chip); 715 azx_clear_irq_pending(chip);
673 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 716 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
717 haswell_save_bclk(chip);
674 hda_display_power(false); 718 hda_display_power(false);
719 }
675 return 0; 720 return 0;
676} 721}
677 722
@@ -689,8 +734,10 @@ static int azx_runtime_resume(struct device *dev)
689 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 734 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
690 return 0; 735 return 0;
691 736
692 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 737 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
693 hda_display_power(true); 738 hda_display_power(true);
739 haswell_restore_bclk(chip);
740 }
694 741
695 /* Read STATESTS before controller reset */ 742 /* Read STATESTS before controller reset */
696 status = azx_readw(chip, STATESTS); 743 status = azx_readw(chip, STATESTS);
@@ -883,6 +930,8 @@ static int register_vga_switcheroo(struct azx *chip)
883static int azx_free(struct azx *chip) 930static int azx_free(struct azx *chip)
884{ 931{
885 struct pci_dev *pci = chip->pci; 932 struct pci_dev *pci = chip->pci;
933 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
934
886 int i; 935 int i;
887 936
888 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) 937 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
@@ -930,7 +979,7 @@ static int azx_free(struct azx *chip)
930 hda_display_power(false); 979 hda_display_power(false);
931 hda_i915_exit(); 980 hda_i915_exit();
932 } 981 }
933 kfree(chip); 982 kfree(hda);
934 983
935 return 0; 984 return 0;
936} 985}
@@ -1174,6 +1223,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1174 static struct snd_device_ops ops = { 1223 static struct snd_device_ops ops = {
1175 .dev_free = azx_dev_free, 1224 .dev_free = azx_dev_free,
1176 }; 1225 };
1226 struct hda_intel *hda;
1177 struct azx *chip; 1227 struct azx *chip;
1178 int err; 1228 int err;
1179 1229
@@ -1183,13 +1233,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1183 if (err < 0) 1233 if (err < 0)
1184 return err; 1234 return err;
1185 1235
1186 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1236 hda = kzalloc(sizeof(*hda), GFP_KERNEL);
1187 if (!chip) { 1237 if (!hda) {
1188 dev_err(card->dev, "Cannot allocate chip\n"); 1238 dev_err(card->dev, "Cannot allocate hda\n");
1189 pci_disable_device(pci); 1239 pci_disable_device(pci);
1190 return -ENOMEM; 1240 return -ENOMEM;
1191 } 1241 }
1192 1242
1243 chip = &hda->chip;
1193 spin_lock_init(&chip->reg_lock); 1244 spin_lock_init(&chip->reg_lock);
1194 mutex_init(&chip->open_mutex); 1245 mutex_init(&chip->open_mutex);
1195 chip->card = card; 1246 chip->card = card;
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index ebd1fa6f015c..4e2d4863daa1 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -417,6 +417,27 @@ struct snd_hda_pin_quirk {
417 int value; /* quirk value */ 417 int value; /* quirk value */
418}; 418};
419 419
420#ifdef CONFIG_SND_DEBUG_VERBOSE
421
422#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
423 { .codec = _codec,\
424 .subvendor = _subvendor,\
425 .name = _name,\
426 .value = _value,\
427 .pins = (const struct hda_pintbl[]) { _pins } \
428 }
429#else
430
431#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
432 { .codec = _codec,\
433 .subvendor = _subvendor,\
434 .value = _value,\
435 .pins = (const struct hda_pintbl[]) { _pins } \
436 }
437
438#endif
439
440
420/* fixup types */ 441/* fixup types */
421enum { 442enum {
422 HDA_FIXUP_INVALID, 443 HDA_FIXUP_INVALID,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3e4417b0ddbe..4fe876b65fda 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2204,7 +2204,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
2204 struct hdmi_spec *spec = codec->spec; 2204 struct hdmi_spec *spec = codec->spec;
2205 int pin_idx; 2205 int pin_idx;
2206 2206
2207 generic_hdmi_init(codec); 2207 codec->patch_ops.init(codec);
2208 snd_hda_codec_resume_amp(codec); 2208 snd_hda_codec_resume_amp(codec);
2209 snd_hda_codec_resume_cache(codec); 2209 snd_hda_codec_resume_cache(codec);
2210 2210
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index af76995fa966..1c654effcd1a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4962,228 +4962,129 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4962}; 4962};
4963 4963
4964static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { 4964static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4965 { 4965 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4966 .codec = 0x10ec0255, 4966 {0x12, 0x90a60140},
4967 .subvendor = 0x1028, 4967 {0x14, 0x90170110},
4968#ifdef CONFIG_SND_DEBUG_VERBOSE 4968 {0x17, 0x40000000},
4969 .name = "Dell", 4969 {0x18, 0x411111f0},
4970#endif 4970 {0x19, 0x411111f0},
4971 .pins = (const struct hda_pintbl[]) { 4971 {0x1a, 0x411111f0},
4972 {0x12, 0x90a60140}, 4972 {0x1b, 0x411111f0},
4973 {0x14, 0x90170110}, 4973 {0x1d, 0x40700001},
4974 {0x17, 0x40000000}, 4974 {0x1e, 0x411111f0},
4975 {0x18, 0x411111f0}, 4975 {0x21, 0x02211020}),
4976 {0x19, 0x411111f0}, 4976 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4977 {0x1a, 0x411111f0}, 4977 {0x12, 0x90a60160},
4978 {0x1b, 0x411111f0}, 4978 {0x14, 0x90170120},
4979 {0x1d, 0x40700001}, 4979 {0x17, 0x40000000},
4980 {0x1e, 0x411111f0}, 4980 {0x18, 0x411111f0},
4981 {0x21, 0x02211020}, 4981 {0x19, 0x411111f0},
4982 }, 4982 {0x1a, 0x411111f0},
4983 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 4983 {0x1b, 0x411111f0},
4984 }, 4984 {0x1d, 0x40700001},
4985 { 4985 {0x1e, 0x411111f0},
4986 .codec = 0x10ec0255, 4986 {0x21, 0x02211030}),
4987 .subvendor = 0x1028, 4987 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4988#ifdef CONFIG_SND_DEBUG_VERBOSE 4988 {0x12, 0x90a60160},
4989 .name = "Dell", 4989 {0x14, 0x90170120},
4990#endif 4990 {0x17, 0x90170140},
4991 .pins = (const struct hda_pintbl[]) { 4991 {0x18, 0x40000000},
4992 {0x12, 0x90a60160}, 4992 {0x19, 0x411111f0},
4993 {0x14, 0x90170120}, 4993 {0x1a, 0x411111f0},
4994 {0x17, 0x40000000}, 4994 {0x1b, 0x411111f0},
4995 {0x18, 0x411111f0}, 4995 {0x1d, 0x41163b05},
4996 {0x19, 0x411111f0}, 4996 {0x1e, 0x411111f0},
4997 {0x1a, 0x411111f0}, 4997 {0x21, 0x0321102f}),
4998 {0x1b, 0x411111f0}, 4998 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4999 {0x1d, 0x40700001}, 4999 {0x12, 0x90a60160},
5000 {0x1e, 0x411111f0}, 5000 {0x14, 0x90170130},
5001 {0x21, 0x02211030}, 5001 {0x17, 0x40000000},
5002 }, 5002 {0x18, 0x411111f0},
5003 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5003 {0x19, 0x411111f0},
5004 }, 5004 {0x1a, 0x411111f0},
5005 { 5005 {0x1b, 0x411111f0},
5006 .codec = 0x10ec0255, 5006 {0x1d, 0x40700001},
5007 .subvendor = 0x1028, 5007 {0x1e, 0x411111f0},
5008#ifdef CONFIG_SND_DEBUG_VERBOSE 5008 {0x21, 0x02211040}),
5009 .name = "Dell", 5009 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5010#endif 5010 {0x12, 0x90a60160},
5011 .pins = (const struct hda_pintbl[]) { 5011 {0x14, 0x90170140},
5012 {0x12, 0x90a60160}, 5012 {0x17, 0x40000000},
5013 {0x14, 0x90170120}, 5013 {0x18, 0x411111f0},
5014 {0x17, 0x90170140}, 5014 {0x19, 0x411111f0},
5015 {0x18, 0x40000000}, 5015 {0x1a, 0x411111f0},
5016 {0x19, 0x411111f0}, 5016 {0x1b, 0x411111f0},
5017 {0x1a, 0x411111f0}, 5017 {0x1d, 0x40700001},
5018 {0x1b, 0x411111f0}, 5018 {0x1e, 0x411111f0},
5019 {0x1d, 0x41163b05}, 5019 {0x21, 0x02211050}),
5020 {0x1e, 0x411111f0}, 5020 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5021 {0x21, 0x0321102f}, 5021 {0x12, 0x90a60170},
5022 }, 5022 {0x14, 0x90170120},
5023 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5023 {0x17, 0x40000000},
5024 }, 5024 {0x18, 0x411111f0},
5025 { 5025 {0x19, 0x411111f0},
5026 .codec = 0x10ec0255, 5026 {0x1a, 0x411111f0},
5027 .subvendor = 0x1028, 5027 {0x1b, 0x411111f0},
5028#ifdef CONFIG_SND_DEBUG_VERBOSE 5028 {0x1d, 0x40700001},
5029 .name = "Dell", 5029 {0x1e, 0x411111f0},
5030#endif 5030 {0x21, 0x02211030}),
5031 .pins = (const struct hda_pintbl[]) { 5031 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5032 {0x12, 0x90a60160}, 5032 {0x12, 0x90a60170},
5033 {0x14, 0x90170130}, 5033 {0x14, 0x90170130},
5034 {0x17, 0x40000000}, 5034 {0x17, 0x40000000},
5035 {0x18, 0x411111f0}, 5035 {0x18, 0x411111f0},
5036 {0x19, 0x411111f0}, 5036 {0x19, 0x411111f0},
5037 {0x1a, 0x411111f0}, 5037 {0x1a, 0x411111f0},
5038 {0x1b, 0x411111f0}, 5038 {0x1b, 0x411111f0},
5039 {0x1d, 0x40700001}, 5039 {0x1d, 0x40700001},
5040 {0x1e, 0x411111f0}, 5040 {0x1e, 0x411111f0},
5041 {0x21, 0x02211040}, 5041 {0x21, 0x02211040}),
5042 }, 5042 SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5043 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5043 {0x12, 0x90a60130},
5044 }, 5044 {0x14, 0x90170110},
5045 { 5045 {0x17, 0x40020008},
5046 .codec = 0x10ec0255, 5046 {0x18, 0x411111f0},
5047 .subvendor = 0x1028, 5047 {0x19, 0x411111f0},
5048#ifdef CONFIG_SND_DEBUG_VERBOSE 5048 {0x1a, 0x411111f0},
5049 .name = "Dell", 5049 {0x1b, 0x411111f0},
5050#endif 5050 {0x1d, 0x40e00001},
5051 .pins = (const struct hda_pintbl[]) { 5051 {0x1e, 0x411111f0},
5052 {0x12, 0x90a60160}, 5052 {0x21, 0x0321101f}),
5053 {0x14, 0x90170140}, 5053 SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5054 {0x17, 0x40000000}, 5054 {0x12, 0x90a60160},
5055 {0x18, 0x411111f0}, 5055 {0x14, 0x90170120},
5056 {0x19, 0x411111f0}, 5056 {0x17, 0x40000000},
5057 {0x1a, 0x411111f0}, 5057 {0x18, 0x411111f0},
5058 {0x1b, 0x411111f0}, 5058 {0x19, 0x411111f0},
5059 {0x1d, 0x40700001}, 5059 {0x1a, 0x411111f0},
5060 {0x1e, 0x411111f0}, 5060 {0x1b, 0x411111f0},
5061 {0x21, 0x02211050}, 5061 {0x1d, 0x40700001},
5062 }, 5062 {0x1e, 0x411111f0},
5063 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5063 {0x21, 0x02211030}),
5064 }, 5064 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5065 { 5065 {0x12, 0x90a60140},
5066 .codec = 0x10ec0255, 5066 {0x13, 0x411111f0},
5067 .subvendor = 0x1028, 5067 {0x14, 0x90170110},
5068#ifdef CONFIG_SND_DEBUG_VERBOSE 5068 {0x15, 0x0221401f},
5069 .name = "Dell", 5069 {0x16, 0x411111f0},
5070#endif 5070 {0x18, 0x411111f0},
5071 .pins = (const struct hda_pintbl[]) { 5071 {0x19, 0x411111f0},
5072 {0x12, 0x90a60170}, 5072 {0x1a, 0x411111f0},
5073 {0x14, 0x90170120}, 5073 {0x1b, 0x411111f0},
5074 {0x17, 0x40000000}, 5074 {0x1d, 0x40700001},
5075 {0x18, 0x411111f0}, 5075 {0x1e, 0x411111f0}),
5076 {0x19, 0x411111f0}, 5076 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5077 {0x1a, 0x411111f0}, 5077 {0x12, 0x40000000},
5078 {0x1b, 0x411111f0}, 5078 {0x13, 0x90a60140},
5079 {0x1d, 0x40700001}, 5079 {0x14, 0x90170110},
5080 {0x1e, 0x411111f0}, 5080 {0x15, 0x0221401f},
5081 {0x21, 0x02211030}, 5081 {0x16, 0x21014020},
5082 }, 5082 {0x18, 0x411111f0},
5083 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5083 {0x19, 0x21a19030},
5084 }, 5084 {0x1a, 0x411111f0},
5085 { 5085 {0x1b, 0x411111f0},
5086 .codec = 0x10ec0255, 5086 {0x1d, 0x40700001},
5087 .subvendor = 0x1028, 5087 {0x1e, 0x411111f0}),
5088#ifdef CONFIG_SND_DEBUG_VERBOSE
5089 .name = "Dell",
5090#endif
5091 .pins = (const struct hda_pintbl[]) {
5092 {0x12, 0x90a60170},
5093 {0x14, 0x90170130},
5094 {0x17, 0x40000000},
5095 {0x18, 0x411111f0},
5096 {0x19, 0x411111f0},
5097 {0x1a, 0x411111f0},
5098 {0x1b, 0x411111f0},
5099 {0x1d, 0x40700001},
5100 {0x1e, 0x411111f0},
5101 {0x21, 0x02211040},
5102 },
5103 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5104 },
5105 {
5106 .codec = 0x10ec0283,
5107 .subvendor = 0x1028,
5108#ifdef CONFIG_SND_DEBUG_VERBOSE
5109 .name = "Dell",
5110#endif
5111 .pins = (const struct hda_pintbl[]) {
5112 {0x12, 0x90a60130},
5113 {0x14, 0x90170110},
5114 {0x17, 0x40020008},
5115 {0x18, 0x411111f0},
5116 {0x19, 0x411111f0},
5117 {0x1a, 0x411111f0},
5118 {0x1b, 0x411111f0},
5119 {0x1d, 0x40e00001},
5120 {0x1e, 0x411111f0},
5121 {0x21, 0x0321101f},
5122 },
5123 .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5124 },
5125 {
5126 .codec = 0x10ec0283,
5127 .subvendor = 0x1028,
5128#ifdef CONFIG_SND_DEBUG_VERBOSE
5129 .name = "Dell",
5130#endif
5131 .pins = (const struct hda_pintbl[]) {
5132 {0x12, 0x90a60160},
5133 {0x14, 0x90170120},
5134 {0x17, 0x40000000},
5135 {0x18, 0x411111f0},
5136 {0x19, 0x411111f0},
5137 {0x1a, 0x411111f0},
5138 {0x1b, 0x411111f0},
5139 {0x1d, 0x40700001},
5140 {0x1e, 0x411111f0},
5141 {0x21, 0x02211030},
5142 },
5143 .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5144 },
5145 {
5146 .codec = 0x10ec0292,
5147 .subvendor = 0x1028,
5148#ifdef CONFIG_SND_DEBUG_VERBOSE
5149 .name = "Dell",
5150#endif
5151 .pins = (const struct hda_pintbl[]) {
5152 {0x12, 0x90a60140},
5153 {0x13, 0x411111f0},
5154 {0x14, 0x90170110},
5155 {0x15, 0x0221401f},
5156 {0x16, 0x411111f0},
5157 {0x18, 0x411111f0},
5158 {0x19, 0x411111f0},
5159 {0x1a, 0x411111f0},
5160 {0x1b, 0x411111f0},
5161 {0x1d, 0x40700001},
5162 {0x1e, 0x411111f0},
5163 },
5164 .value = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5165 },
5166 {
5167 .codec = 0x10ec0293,
5168 .subvendor = 0x1028,
5169#ifdef CONFIG_SND_DEBUG_VERBOSE
5170 .name = "Dell",
5171#endif
5172 .pins = (const struct hda_pintbl[]) {
5173 {0x12, 0x40000000},
5174 {0x13, 0x90a60140},
5175 {0x14, 0x90170110},
5176 {0x15, 0x0221401f},
5177 {0x16, 0x21014020},
5178 {0x18, 0x411111f0},
5179 {0x19, 0x21a19030},
5180 {0x1a, 0x411111f0},
5181 {0x1b, 0x411111f0},
5182 {0x1d, 0x40700001},
5183 {0x1e, 0x411111f0},
5184 },
5185 .value = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5186 },
5187 {} 5088 {}
5188}; 5089};
5189 5090
@@ -6039,90 +5940,66 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
6039}; 5940};
6040 5941
6041static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { 5942static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6042 { 5943 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6043 .codec = 0x10ec0668, 5944 {0x12, 0x99a30130},
6044 .subvendor = 0x1028, 5945 {0x14, 0x90170110},
6045#ifdef CONFIG_SND_DEBUG_VERBOSE 5946 {0x15, 0x0321101f},
6046 .name = "Dell", 5947 {0x16, 0x03011020},
6047#endif 5948 {0x18, 0x40000008},
6048 .pins = (const struct hda_pintbl[]) { 5949 {0x19, 0x411111f0},
6049 {0x12, 0x99a30130}, 5950 {0x1a, 0x411111f0},
6050 {0x14, 0x90170110}, 5951 {0x1b, 0x411111f0},
6051 {0x15, 0x0321101f}, 5952 {0x1d, 0x41000001},
6052 {0x16, 0x03011020}, 5953 {0x1e, 0x411111f0},
6053 {0x18, 0x40000008}, 5954 {0x1f, 0x411111f0}),
6054 {0x19, 0x411111f0}, 5955 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6055 {0x1a, 0x411111f0}, 5956 {0x12, 0x99a30140},
6056 {0x1b, 0x411111f0}, 5957 {0x14, 0x90170110},
6057 {0x1d, 0x41000001}, 5958 {0x15, 0x0321101f},
6058 {0x1e, 0x411111f0}, 5959 {0x16, 0x03011020},
6059 {0x1f, 0x411111f0}, 5960 {0x18, 0x40000008},
6060 }, 5961 {0x19, 0x411111f0},
6061 .value = ALC668_FIXUP_AUTO_MUTE, 5962 {0x1a, 0x411111f0},
6062 }, 5963 {0x1b, 0x411111f0},
6063 { 5964 {0x1d, 0x41000001},
6064 .codec = 0x10ec0668, 5965 {0x1e, 0x411111f0},
6065 .subvendor = 0x1028, 5966 {0x1f, 0x411111f0}),
6066#ifdef CONFIG_SND_DEBUG_VERBOSE 5967 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6067 .name = "Dell", 5968 {0x12, 0x99a30150},
6068#endif 5969 {0x14, 0x90170110},
6069 .pins = (const struct hda_pintbl[]) { 5970 {0x15, 0x0321101f},
6070 {0x12, 0x99a30140}, 5971 {0x16, 0x03011020},
6071 {0x14, 0x90170110}, 5972 {0x18, 0x40000008},
6072 {0x15, 0x0321101f}, 5973 {0x19, 0x411111f0},
6073 {0x16, 0x03011020}, 5974 {0x1a, 0x411111f0},
6074 {0x18, 0x40000008}, 5975 {0x1b, 0x411111f0},
6075 {0x19, 0x411111f0}, 5976 {0x1d, 0x41000001},
6076 {0x1a, 0x411111f0}, 5977 {0x1e, 0x411111f0},
6077 {0x1b, 0x411111f0}, 5978 {0x1f, 0x411111f0}),
6078 {0x1d, 0x41000001}, 5979 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6079 {0x1e, 0x411111f0}, 5980 {0x12, 0x411111f0},
6080 {0x1f, 0x411111f0}, 5981 {0x14, 0x90170110},
6081 }, 5982 {0x15, 0x0321101f},
6082 .value = ALC668_FIXUP_AUTO_MUTE, 5983 {0x16, 0x03011020},
6083 }, 5984 {0x18, 0x40000008},
6084 { 5985 {0x19, 0x411111f0},
6085 .codec = 0x10ec0668, 5986 {0x1a, 0x411111f0},
6086 .subvendor = 0x1028, 5987 {0x1b, 0x411111f0},
6087#ifdef CONFIG_SND_DEBUG_VERBOSE 5988 {0x1d, 0x41000001},
6088 .name = "Dell", 5989 {0x1e, 0x411111f0},
6089#endif 5990 {0x1f, 0x411111f0}),
6090 .pins = (const struct hda_pintbl[]) { 5991 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
6091 {0x12, 0x99a30150}, 5992 {0x12, 0x90a60130},
6092 {0x14, 0x90170110}, 5993 {0x14, 0x90170110},
6093 {0x15, 0x0321101f}, 5994 {0x15, 0x0321101f},
6094 {0x16, 0x03011020}, 5995 {0x16, 0x40000000},
6095 {0x18, 0x40000008}, 5996 {0x18, 0x411111f0},
6096 {0x19, 0x411111f0}, 5997 {0x19, 0x411111f0},
6097 {0x1a, 0x411111f0}, 5998 {0x1a, 0x411111f0},
6098 {0x1b, 0x411111f0}, 5999 {0x1b, 0x411111f0},
6099 {0x1d, 0x41000001}, 6000 {0x1d, 0x40d6832d},
6100 {0x1e, 0x411111f0}, 6001 {0x1e, 0x411111f0},
6101 {0x1f, 0x411111f0}, 6002 {0x1f, 0x411111f0}),
6102 },
6103 .value = ALC668_FIXUP_AUTO_MUTE,
6104 },
6105 {
6106 .codec = 0x10ec0668,
6107 .subvendor = 0x1028,
6108#ifdef CONFIG_SND_DEBUG_VERBOSE
6109 .name = "Dell",
6110#endif
6111 .pins = (const struct hda_pintbl[]) {
6112 {0x12, 0x411111f0},
6113 {0x14, 0x90170110},
6114 {0x15, 0x0321101f},
6115 {0x16, 0x03011020},
6116 {0x18, 0x40000008},
6117 {0x19, 0x411111f0},
6118 {0x1a, 0x411111f0},
6119 {0x1b, 0x411111f0},
6120 {0x1d, 0x41000001},
6121 {0x1e, 0x411111f0},
6122 {0x1f, 0x411111f0},
6123 },
6124 .value = ALC668_FIXUP_AUTO_MUTE,
6125 },
6126 {} 6003 {}
6127}; 6004};
6128 6005
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7f40a150899c..3744ea4e843d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -122,6 +122,12 @@ enum {
122}; 122};
123 123
124enum { 124enum {
125 STAC_92HD95_HP_LED,
126 STAC_92HD95_HP_BASS,
127 STAC_92HD95_MODELS
128};
129
130enum {
125 STAC_925x_REF, 131 STAC_925x_REF,
126 STAC_M1, 132 STAC_M1,
127 STAC_M1_2, 133 STAC_M1_2,
@@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
4128 {} /* terminator */ 4134 {} /* terminator */
4129}; 4135};
4130 4136
4137static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
4138 const struct hda_fixup *fix, int action)
4139{
4140 struct sigmatel_spec *spec = codec->spec;
4141
4142 if (action != HDA_FIXUP_ACT_PRE_PROBE)
4143 return;
4144
4145 if (find_mute_led_cfg(codec, spec->default_polarity))
4146 codec_dbg(codec, "mute LED gpio %d polarity %d\n",
4147 spec->gpio_led,
4148 spec->gpio_led_polarity);
4149}
4150
4151static const struct hda_fixup stac92hd95_fixups[] = {
4152 [STAC_92HD95_HP_LED] = {
4153 .type = HDA_FIXUP_FUNC,
4154 .v.func = stac92hd95_fixup_hp_led,
4155 },
4156 [STAC_92HD95_HP_BASS] = {
4157 .type = HDA_FIXUP_VERBS,
4158 .v.verbs = (const struct hda_verb[]) {
4159 {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
4160 {}
4161 },
4162 .chained = true,
4163 .chain_id = STAC_92HD95_HP_LED,
4164 },
4165};
4166
4167static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
4168 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
4169 {} /* terminator */
4170};
4171
4172static const struct hda_model_fixup stac92hd95_models[] = {
4173 { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
4174 { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
4175 {}
4176};
4177
4178
4131static int stac_parse_auto_config(struct hda_codec *codec) 4179static int stac_parse_auto_config(struct hda_codec *codec)
4132{ 4180{
4133 struct sigmatel_spec *spec = codec->spec; 4181 struct sigmatel_spec *spec = codec->spec;
@@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
4580 spec->gen.beep_nid = 0x19; /* digital beep */ 4628 spec->gen.beep_nid = 0x19; /* digital beep */
4581 spec->pwr_nids = stac92hd95_pwr_nids; 4629 spec->pwr_nids = stac92hd95_pwr_nids;
4582 spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids); 4630 spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
4583 spec->default_polarity = -1; /* no default cfg */ 4631 spec->default_polarity = 0;
4584 4632
4585 codec->patch_ops = stac_patch_ops; 4633 codec->patch_ops = stac_patch_ops;
4586 4634
4635 snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
4636 stac92hd95_fixups);
4637 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
4638
4639 stac_setup_gpio(codec);
4640
4587 err = stac_parse_auto_config(codec); 4641 err = stac_parse_auto_config(codec);
4588 if (err < 0) { 4642 if (err < 0) {
4589 stac_free(codec); 4643 stac_free(codec);
@@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
4592 4646
4593 codec->proc_widget_hook = stac92hd_proc_hook; 4647 codec->proc_widget_hook = stac92hd_proc_hook;
4594 4648
4649 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
4650
4595 return 0; 4651 return 0;
4596} 4652}
4597 4653
diff --git a/sound/usb/card.c b/sound/usb/card.c
index c3b5b7dca1c3..a09e5f3519e3 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
307 307
308static int snd_usb_audio_free(struct snd_usb_audio *chip) 308static int snd_usb_audio_free(struct snd_usb_audio *chip)
309{ 309{
310 struct list_head *p, *n;
311
312 list_for_each_safe(p, n, &chip->ep_list)
313 snd_usb_endpoint_free(p);
314
310 mutex_destroy(&chip->mutex); 315 mutex_destroy(&chip->mutex);
311 kfree(chip); 316 kfree(chip);
312 return 0; 317 return 0;
@@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
585 struct snd_usb_audio *chip) 590 struct snd_usb_audio *chip)
586{ 591{
587 struct snd_card *card; 592 struct snd_card *card;
588 struct list_head *p, *n; 593 struct list_head *p;
589 594
590 if (chip == (void *)-1L) 595 if (chip == (void *)-1L)
591 return; 596 return;
@@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
598 mutex_lock(&register_mutex); 603 mutex_lock(&register_mutex);
599 chip->num_interfaces--; 604 chip->num_interfaces--;
600 if (chip->num_interfaces <= 0) { 605 if (chip->num_interfaces <= 0) {
606 struct snd_usb_endpoint *ep;
607
601 snd_card_disconnect(card); 608 snd_card_disconnect(card);
602 /* release the pcm resources */ 609 /* release the pcm resources */
603 list_for_each(p, &chip->pcm_list) { 610 list_for_each(p, &chip->pcm_list) {
604 snd_usb_stream_disconnect(p); 611 snd_usb_stream_disconnect(p);
605 } 612 }
606 /* release the endpoint resources */ 613 /* release the endpoint resources */
607 list_for_each_safe(p, n, &chip->ep_list) { 614 list_for_each_entry(ep, &chip->ep_list, list) {
608 snd_usb_endpoint_free(p); 615 snd_usb_endpoint_release(ep);
609 } 616 }
610 /* release the midi resources */ 617 /* release the midi resources */
611 list_for_each(p, &chip->midi_list) { 618 list_for_each(p, &chip->midi_list) {
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 289f582c9130..114e3e7ff511 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -987,19 +987,30 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
987} 987}
988 988
989/** 989/**
990 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
991 *
992 * @ep: the endpoint to release
993 *
994 * This function does not care for the endpoint's use count but will tear
995 * down all the streaming URBs immediately.
996 */
997void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
998{
999 release_urbs(ep, 1);
1000}
1001
1002/**
990 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint 1003 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
991 * 1004 *
992 * @ep: the list header of the endpoint to free 1005 * @ep: the list header of the endpoint to free
993 * 1006 *
994 * This function does not care for the endpoint's use count but will tear 1007 * This free all resources of the given ep.
995 * down all the streaming URBs immediately and free all resources.
996 */ 1008 */
997void snd_usb_endpoint_free(struct list_head *head) 1009void snd_usb_endpoint_free(struct list_head *head)
998{ 1010{
999 struct snd_usb_endpoint *ep; 1011 struct snd_usb_endpoint *ep;
1000 1012
1001 ep = list_entry(head, struct snd_usb_endpoint, list); 1013 ep = list_entry(head, struct snd_usb_endpoint, list);
1002 release_urbs(ep, 1);
1003 kfree(ep); 1014 kfree(ep);
1004} 1015}
1005 1016
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 1c7e8ee48abc..e61ee5c356a3 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); 23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); 24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
25void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); 25void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
26void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
26void snd_usb_endpoint_free(struct list_head *head); 27void snd_usb_endpoint_free(struct list_head *head);
27 28
28int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); 29int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 51267f4184a6..2cede239a074 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -2,7 +2,7 @@ PROGS := tm-resched-dscr
2 2
3all: $(PROGS) 3all: $(PROGS)
4 4
5$(PROGS): 5$(PROGS): ../harness.c
6 6
7run_tests: all 7run_tests: all
8 @-for PROG in $(PROGS); do \ 8 @-for PROG in $(PROGS); do \
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index ee98e3886af2..42d4c8caad81 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -28,6 +28,8 @@
28#include <assert.h> 28#include <assert.h>
29#include <asm/tm.h> 29#include <asm/tm.h>
30 30
31#include "utils.h"
32
31#define TBEGIN ".long 0x7C00051D ;" 33#define TBEGIN ".long 0x7C00051D ;"
32#define TEND ".long 0x7C00055D ;" 34#define TEND ".long 0x7C00055D ;"
33#define TCHECK ".long 0x7C00059C ;" 35#define TCHECK ".long 0x7C00059C ;"
@@ -36,7 +38,8 @@
36#define SPRN_TEXASR 0x82 38#define SPRN_TEXASR 0x82
37#define SPRN_DSCR 0x03 39#define SPRN_DSCR 0x03
38 40
39int main(void) { 41int test_body(void)
42{
40 uint64_t rv, dscr1 = 1, dscr2, texasr; 43 uint64_t rv, dscr1 = 1, dscr2, texasr;
41 44
42 printf("Check DSCR TM context switch: "); 45 printf("Check DSCR TM context switch: ");
@@ -81,10 +84,15 @@ int main(void) {
81 } 84 }
82 if (dscr2 != dscr1) { 85 if (dscr2 != dscr1) {
83 printf(" FAIL\n"); 86 printf(" FAIL\n");
84 exit(EXIT_FAILURE); 87 return 1;
85 } else { 88 } else {
86 printf(" OK\n"); 89 printf(" OK\n");
87 exit(EXIT_SUCCESS); 90 return 0;
88 } 91 }
89 } 92 }
90} 93}
94
95int main(void)
96{
97 return test_harness(test_body, "tm_resched_dscr");
98}