aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /arch
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile2
-rw-r--r--arch/arc/boot/dts/axs10x_mb.dtsi1
-rw-r--r--arch/arc/boot/dts/nsim_hs.dts3
-rw-r--r--arch/arc/configs/axs101_defconfig2
-rw-r--r--arch/arc/configs/axs103_defconfig2
-rw-r--r--arch/arc/configs/axs103_smp_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_defconfig2
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig2
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_defconfig2
-rw-r--r--arch/arc/configs/vdk_hs38_smp_defconfig2
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h3
-rw-r--r--arch/arc/include/asm/irqflags-compact.h2
-rw-r--r--arch/arc/include/asm/mach_desc.h4
-rw-r--r--arch/arc/include/asm/smp.h4
-rw-r--r--arch/arc/include/asm/unwind.h4
-rw-r--r--arch/arc/kernel/ctx_sw.c2
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S3
-rw-r--r--arch/arc/kernel/intc-arcv2.c15
-rw-r--r--arch/arc/kernel/irq.c33
-rw-r--r--arch/arc/kernel/mcip.c2
-rw-r--r--arch/arc/kernel/perf_event.c32
-rw-r--r--arch/arc/kernel/process.c9
-rw-r--r--arch/arc/kernel/setup.c1
-rw-r--r--arch/arc/kernel/smp.c8
-rw-r--r--arch/arc/kernel/unwind.c94
-rw-r--r--arch/arc/mm/highmem.c4
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arc/mm/tlb.c4
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi4
-rw-r--r--arch/arm/boot/dts/am43xx-clocks.dtsi8
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts1
-rw-r--r--arch/arm/boot/dts/animeo_ip.dts6
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi1
-rw-r--r--arch/arm/boot/dts/at91-foxg20.dts2
-rw-r--r--arch/arm/boot/dts/at91-kizbox.dts13
-rw-r--r--arch/arm/boot/dts/at91-kizbox2.dts6
-rw-r--r--arch/arm/boot/dts/at91-kizboxmini.dts4
-rw-r--r--arch/arm/boot/dts/at91-qil_a9260.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_xplained.dts116
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts2
-rw-r--r--arch/arm/boot/dts/at91-sama5d4_xplained.dts12
-rw-r--r--arch/arm/boot/dts/at91-sama5d4ek.dts12
-rw-r--r--arch/arm/boot/dts/at91rm9200ek.dts9
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts19
-rw-r--r--arch/arm/boot/dts/at91sam9263ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9g20ek_common.dtsi13
-rw-r--r--arch/arm/boot/dts/at91sam9m10g45ek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts11
-rw-r--r--arch/arm/boot/dts/at91sam9rlek.dts13
-rw-r--r--arch/arm/boot/dts/at91sam9x5cm.dtsi11
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi8
-rw-r--r--arch/arm/boot/dts/dm816x.dtsi8
-rw-r--r--arch/arm/boot/dts/dra7.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6q-gw5400-a.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi6
-rw-r--r--arch/arm/boot/dts/k2l-netcp.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-ts219.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-duovero-parlor.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-veyron-minnie.dts4
-rw-r--r--arch/arm/boot/dts/rk3288.dtsi10
-rw-r--r--arch/arm/boot/dts/sama5d35ek.dts2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31s-primo81.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-nyan.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9260_common.dtsi2
-rw-r--r--arch/arm/boot/dts/usb_a9263.dts2
-rw-r--r--arch/arm/boot/dts/vf610-colibri.dtsi5
-rw-r--r--arch/arm/boot/dts/vf610.dtsi2
-rw-r--r--arch/arm/boot/dts/vfxxx.dtsi14
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig1
-rw-r--r--arch/arm/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm/include/asm/irq.h5
-rw-r--r--arch/arm/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/bios32.c19
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/swp_emulate.c6
-rw-r--r--arch/arm/kvm/arm.c7
-rw-r--r--arch/arm/kvm/mmio.c5
-rw-r--r--arch/arm/kvm/mmu.c15
-rw-r--r--arch/arm/kvm/psci.c20
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c29
-rw-r--r--arch/arm/mach-at91/Kconfig6
-rw-r--r--arch/arm/mach-at91/pm.c7
-rw-r--r--arch/arm/mach-dove/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-exynos/pmu.c6
-rw-r--r--arch/arm/mach-imx/gpc.c1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h12
-rw-r--r--arch/arm/mach-omap2/Kconfig4
-rw-r--r--arch/arm/mach-omap2/omap-smp.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c66
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c56
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_81xx_data.c3
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c29
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-omap2/timer.c6
-rw-r--r--arch/arm/mach-orion5x/include/mach/entry-macro.S2
-rw-r--r--arch/arm/mach-pxa/ezx.c5
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c2
-rw-r--r--arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c2
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7793.c2
-rw-r--r--arch/arm/mach-zx/Kconfig2
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm64/Kconfig23
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi5
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h1
-rw-r--r--arch/arm64/include/asm/cpufeature.h25
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h6
-rw-r--r--arch/arm64/include/asm/irq.h5
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h18
-rw-r--r--arch/arm64/include/asm/pgtable.h12
-rw-r--r--arch/arm64/kernel/cpu_errata.c9
-rw-r--r--arch/arm64/kernel/cpufeature.c37
-rw-r--r--arch/arm64/kernel/efi.c33
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S5
-rw-r--r--arch/arm64/kvm/handle_exit.c2
-rw-r--r--arch/arm64/kvm/hyp.S14
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c123
-rw-r--r--arch/arm64/kvm/sys_regs.h8
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c4
-rw-r--r--arch/arm64/mm/context.c38
-rw-r--r--arch/arm64/mm/fault.c28
-rw-r--r--arch/arm64/mm/mmu.c77
-rw-r--r--arch/arm64/net/bpf_jit_comp.c47
-rw-r--r--arch/blackfin/kernel/perf_event.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/m68k/coldfire/m54xx.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/setup_no.c9
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/sun3/config.c4
-rw-r--r--arch/microblaze/kernel/dma.c3
-rw-r--r--arch/mips/include/asm/uaccess.h52
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/locore.S16
-rw-r--r--arch/mips/kvm/mips.c5
-rw-r--r--arch/mips/lib/memset.S2
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c5
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c5
-rw-r--r--arch/mips/sni/reset.c8
-rw-r--r--arch/mips/vdso/Makefile4
-rw-r--r--arch/mn10300/Kconfig4
-rw-r--r--arch/nios2/mm/cacheflush.c24
-rw-r--r--arch/parisc/include/asm/pgtable.h3
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/pci.c18
-rw-r--r--arch/parisc/kernel/signal.c64
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/boot/dts/sbc8641d.dts8
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h24
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h12
-rw-r--r--arch/powerpc/kernel/eeh_driver.c14
-rw-r--r--arch/powerpc/kernel/process.c18
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c64
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/s390/kernel/dis.c17
-rw-r--r--arch/s390/kvm/interrupt.c7
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/kvm/sigp.c8
-rw-r--r--arch/sh/include/uapi/asm/unistd_64.h2
-rw-r--r--arch/sh/kernel/perf_event.c2
-rw-r--r--arch/sparc/include/asm/elf_64.h1
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/head_64.S13
-rw-r--r--arch/sparc/kernel/perf_event.c13
-rw-r--r--arch/sparc/kernel/rtrap_64.S8
-rw-r--r--arch/sparc/kernel/setup_64.c9
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/lib/NG2copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG2copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG2memcpy.S118
-rw-r--r--arch/sparc/lib/NG4copy_from_user.S8
-rw-r--r--arch/sparc/lib/NG4copy_to_user.S8
-rw-r--r--arch/sparc/lib/NG4memcpy.S40
-rw-r--r--arch/sparc/lib/U1copy_from_user.S8
-rw-r--r--arch/sparc/lib/U1copy_to_user.S8
-rw-r--r--arch/sparc/lib/U1memcpy.S48
-rw-r--r--arch/sparc/lib/U3copy_from_user.S8
-rw-r--r--arch/sparc/lib/U3copy_to_user.S8
-rw-r--r--arch/sparc/lib/U3memcpy.S86
-rw-r--r--arch/tile/kernel/perf_event.c2
-rw-r--r--arch/um/Makefile2
-rw-r--r--arch/um/drivers/net_user.c10
-rw-r--r--arch/um/kernel/signal.c2
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/video-mode.c2
-rw-r--r--arch/x86/boot/video.c2
-rw-r--r--arch/x86/entry/entry_64.S19
-rw-r--r--arch/x86/include/asm/page_types.h16
-rw-r--r--arch/x86/include/asm/pgtable_types.h14
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.h5
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
-rw-r--r--arch/x86/kernel/irq_work.c2
-rw-r--r--arch/x86/kernel/pmem.c12
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/signal.c17
-rw-r--r--arch/x86/kernel/smpboot.c9
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mtrr.c25
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c12
-rw-r--r--arch/x86/kvm/x86.c73
-rw-r--r--arch/x86/mm/dump_pagetables.c2
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/pci/bus_numa.c13
-rw-r--r--arch/x86/um/signal.c18
-rw-r--r--arch/x86/xen/mmu.c9
-rw-r--r--arch/x86/xen/suspend.c20
245 files changed, 1685 insertions, 1196 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2c2ac3f3ff80..6312f607932f 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -445,6 +445,7 @@ config LINUX_LINK_BASE
445 However some customers have peripherals mapped at this addr, so 445 However some customers have peripherals mapped at this addr, so
446 Linux needs to be scooted a bit. 446 Linux needs to be scooted a bit.
447 If you don't know what the above means, leave this setting alone. 447 If you don't know what the above means, leave this setting alone.
448 This needs to match memory start address specified in Device Tree
448 449
449config HIGHMEM 450config HIGHMEM
450 bool "High Memory Support" 451 bool "High Memory Support"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index cf0cf34eeb24..aeb19021099e 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -81,7 +81,7 @@ endif
81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name) 81LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
82 82
83# Modules with short calls might break for calls into builtin-kernel 83# Modules with short calls might break for calls into builtin-kernel
84KBUILD_CFLAGS_MODULE += -mlong-calls 84KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
85 85
86# Finally dump eveything into kernel build system 86# Finally dump eveything into kernel build system
87KBUILD_CFLAGS += $(cflags-y) 87KBUILD_CFLAGS += $(cflags-y)
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
index f3db32154973..44a578c10732 100644
--- a/arch/arc/boot/dts/axs10x_mb.dtsi
+++ b/arch/arc/boot/dts/axs10x_mb.dtsi
@@ -46,6 +46,7 @@
46 snps,pbl = < 32 >; 46 snps,pbl = < 32 >;
47 clocks = <&apbclk>; 47 clocks = <&apbclk>;
48 clock-names = "stmmaceth"; 48 clock-names = "stmmaceth";
49 max-speed = <100>;
49 }; 50 };
50 51
51 ehci@0x40000 { 52 ehci@0x40000 {
diff --git a/arch/arc/boot/dts/nsim_hs.dts b/arch/arc/boot/dts/nsim_hs.dts
index b0eb0e7fe21d..fc81879bc1f5 100644
--- a/arch/arc/boot/dts/nsim_hs.dts
+++ b/arch/arc/boot/dts/nsim_hs.dts
@@ -17,7 +17,8 @@
17 17
18 memory { 18 memory {
19 device_type = "memory"; 19 device_type = "memory";
20 reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */ 20 /* CONFIG_LINUX_LINK_BASE needs to match low mem start */
21 reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
21 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */ 22 0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
22 }; 23 };
23 24
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index c92c0ef1e9d2..f1ac9818b751 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index cfac24e0e7b6..323486d6ee83 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 9922a118a15a..66191cd0447e 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f761a7c70761..f68838e8068a 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index dc6f74f41283..96bd1c20fb0b 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 3fef0a210c56..fcae66683ca0 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_SWAP is not set 4# CONFIG_SWAP is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index 51784837daae..b01b659168ea 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2CONFIG_DEFAULT_HOSTNAME="ARCLinux" 2CONFIG_DEFAULT_HOSTNAME="ARCLinux"
3# CONFIG_SWAP is not set 3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index ef35ef3923dd..a07f20de221b 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index 634509e5e572..f36c047b33ca 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -1,4 +1,4 @@
1CONFIG_CROSS_COMPILE="arc-linux-uclibc-" 1CONFIG_CROSS_COMPILE="arc-linux-"
2# CONFIG_LOCALVERSION_AUTO is not set 2# CONFIG_LOCALVERSION_AUTO is not set
3CONFIG_DEFAULT_HOSTNAME="ARCLinux" 3CONFIG_DEFAULT_HOSTNAME="ARCLinux"
4# CONFIG_CROSS_MEMORY_ATTACH is not set 4# CONFIG_CROSS_MEMORY_ATTACH is not set
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index abf06e81c929..210ef3e72332 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -62,9 +62,7 @@ extern int ioc_exists;
62#define ARC_REG_IC_IVIC 0x10 62#define ARC_REG_IC_IVIC 0x10
63#define ARC_REG_IC_CTRL 0x11 63#define ARC_REG_IC_CTRL 0x11
64#define ARC_REG_IC_IVIL 0x19 64#define ARC_REG_IC_IVIL 0x19
65#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
66#define ARC_REG_IC_PTAG 0x1E 65#define ARC_REG_IC_PTAG 0x1E
67#endif
68#define ARC_REG_IC_PTAG_HI 0x1F 66#define ARC_REG_IC_PTAG_HI 0x1F
69 67
70/* Bit val in IC_CTRL */ 68/* Bit val in IC_CTRL */
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index ad481c24070d..258b0e5ad332 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -37,6 +37,9 @@
37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ 37#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
38 (ARCV2_IRQ_DEF_PRIO << 1)) 38 (ARCV2_IRQ_DEF_PRIO << 1))
39 39
40/* SLEEP needs default irq priority (<=) which can interrupt the doze */
41#define ISA_SLEEP_ARG (0x10 | ARCV2_IRQ_DEF_PRIO)
42
40#ifndef __ASSEMBLY__ 43#ifndef __ASSEMBLY__
41 44
42/* 45/*
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index d8c608174617..c1d36458bfb7 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -43,6 +43,8 @@
43 43
44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK 44#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
45 45
46#define ISA_SLEEP_ARG 0x3
47
46#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
47 49
48/****************************************************************** 50/******************************************************************
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index 6ff657a904b6..c28e6c347b49 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -23,7 +23,7 @@
23 * @dt_compat: Array of device tree 'compatible' strings 23 * @dt_compat: Array of device tree 'compatible' strings
24 * (XXX: although only 1st entry is looked at) 24 * (XXX: although only 1st entry is looked at)
25 * @init_early: Very early callback [called from setup_arch()] 25 * @init_early: Very early callback [called from setup_arch()]
26 * @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP) 26 * @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
27 * [(M):init_IRQ(), (o):start_kernel_secondary()] 27 * [(M):init_IRQ(), (o):start_kernel_secondary()]
28 * @init_machine: arch initcall level callback (e.g. populate static 28 * @init_machine: arch initcall level callback (e.g. populate static
29 * platform devices or parse Devicetree) 29 * platform devices or parse Devicetree)
@@ -35,7 +35,7 @@ struct machine_desc {
35 const char **dt_compat; 35 const char **dt_compat;
36 void (*init_early)(void); 36 void (*init_early)(void);
37#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
38 void (*init_cpu_smp)(unsigned int); 38 void (*init_per_cpu)(unsigned int);
39#endif 39#endif
40 void (*init_machine)(void); 40 void (*init_machine)(void);
41 void (*init_late)(void); 41 void (*init_late)(void);
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 133c867d15af..991380438d6b 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
48 * @init_early_smp: A SMP specific h/w block can init itself 48 * @init_early_smp: A SMP specific h/w block can init itself
49 * Could be common across platforms so not covered by 49 * Could be common across platforms so not covered by
50 * mach_desc->init_early() 50 * mach_desc->init_early()
51 * @init_irq_cpu: Called for each core so SMP h/w block driver can do 51 * @init_per_cpu: Called for each core so SMP h/w block driver can do
52 * any needed setup per cpu (e.g. IPI request) 52 * any needed setup per cpu (e.g. IPI request)
53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) 53 * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
54 * @ipi_send: To send IPI to a @cpu 54 * @ipi_send: To send IPI to a @cpu
@@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
57struct plat_smp_ops { 57struct plat_smp_ops {
58 const char *info; 58 const char *info;
59 void (*init_early_smp)(void); 59 void (*init_early_smp)(void);
60 void (*init_irq_cpu)(int cpu); 60 void (*init_per_cpu)(int cpu);
61 void (*cpu_kick)(int cpu, unsigned long pc); 61 void (*cpu_kick)(int cpu, unsigned long pc);
62 void (*ipi_send)(int cpu); 62 void (*ipi_send)(int cpu);
63 void (*ipi_clear)(int irq); 63 void (*ipi_clear)(int irq);
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
index 7ca628b6ee2a..c11a25bb8158 100644
--- a/arch/arc/include/asm/unwind.h
+++ b/arch/arc/include/asm/unwind.h
@@ -112,7 +112,6 @@ struct unwind_frame_info {
112 112
113extern int arc_unwind(struct unwind_frame_info *frame); 113extern int arc_unwind(struct unwind_frame_info *frame);
114extern void arc_unwind_init(void); 114extern void arc_unwind_init(void);
115extern void arc_unwind_setup(void);
116extern void *unwind_add_table(struct module *module, const void *table_start, 115extern void *unwind_add_table(struct module *module, const void *table_start,
117 unsigned long table_size); 116 unsigned long table_size);
118extern void unwind_remove_table(void *handle, int init_only); 117extern void unwind_remove_table(void *handle, int init_only);
@@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
152{ 151{
153} 152}
154 153
155static inline void arc_unwind_setup(void)
156{
157}
158#define unwind_add_table(a, b, c) 154#define unwind_add_table(a, b, c)
159#define unwind_remove_table(a, b) 155#define unwind_remove_table(a, b)
160 156
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
index c14a5bea0c76..5d446df2c413 100644
--- a/arch/arc/kernel/ctx_sw.c
+++ b/arch/arc/kernel/ctx_sw.c
@@ -58,8 +58,6 @@ __switch_to(struct task_struct *prev_task, struct task_struct *next_task)
58 "st sp, [r24] \n\t" 58 "st sp, [r24] \n\t"
59#endif 59#endif
60 60
61 "sync \n\t"
62
63 /* 61 /*
64 * setup _current_task with incoming tsk. 62 * setup _current_task with incoming tsk.
65 * optionally, set r25 to that as well 63 * optionally, set r25 to that as well
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index e248594097e7..e6890b1f8650 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -44,9 +44,6 @@ __switch_to:
44 * don't need to do anything special to return it 44 * don't need to do anything special to return it
45 */ 45 */
46 46
47 /* hardware memory barrier */
48 sync
49
50 /* 47 /*
51 * switch to new task, contained in r1 48 * switch to new task, contained in r1
52 * Temp reg r3 is required to get the ptr to store val 49 * Temp reg r3 is required to get the ptr to store val
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 26c156827479..0394f9f61b46 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq, 106static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 if (irq == TIMER0_IRQ || irq == IPI_IRQ) 109 /*
110 * core intc IRQs [16, 23]:
111 * Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
112 */
113 if (hw < 24) {
114 /*
115 * A subsequent request_percpu_irq() fails if percpu_devid is
116 * not set. That in turns sets NOAUTOEN, meaning each core needs
117 * to call enable_percpu_irq()
118 */
119 irq_set_percpu_devid(irq);
110 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq); 120 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
111 else 121 } else {
112 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq); 122 irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
123 }
113 124
114 return 0; 125 return 0;
115} 126}
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 2ee226546c6a..ba17f85285cf 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -29,11 +29,11 @@ void __init init_IRQ(void)
29 29
30#ifdef CONFIG_SMP 30#ifdef CONFIG_SMP
31 /* a SMP H/w block could do IPI IRQ request here */ 31 /* a SMP H/w block could do IPI IRQ request here */
32 if (plat_smp_ops.init_irq_cpu) 32 if (plat_smp_ops.init_per_cpu)
33 plat_smp_ops.init_irq_cpu(smp_processor_id()); 33 plat_smp_ops.init_per_cpu(smp_processor_id());
34 34
35 if (machine_desc->init_cpu_smp) 35 if (machine_desc->init_per_cpu)
36 machine_desc->init_cpu_smp(smp_processor_id()); 36 machine_desc->init_per_cpu(smp_processor_id());
37#endif 37#endif
38} 38}
39 39
@@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
51 set_irq_regs(old_regs); 51 set_irq_regs(old_regs);
52} 52}
53 53
54/*
55 * API called for requesting percpu interrupts - called by each CPU
56 * - For boot CPU, actually request the IRQ with genirq core + enables
57 * - For subsequent callers only enable called locally
58 *
59 * Relies on being called by boot cpu first (i.e. request called ahead) of
60 * any enable as expected by genirq. Hence Suitable only for TIMER, IPI
61 * which are guaranteed to be setup on boot core first.
62 * Late probed peripherals such as perf can't use this as there no guarantee
63 * of being called on boot CPU first.
64 */
65
54void arc_request_percpu_irq(int irq, int cpu, 66void arc_request_percpu_irq(int irq, int cpu,
55 irqreturn_t (*isr)(int irq, void *dev), 67 irqreturn_t (*isr)(int irq, void *dev),
56 const char *irq_nm, 68 const char *irq_nm,
@@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
60 if (!cpu) { 72 if (!cpu) {
61 int rc; 73 int rc;
62 74
75#ifdef CONFIG_ISA_ARCOMPACT
63 /* 76 /*
64 * These 2 calls are essential to making percpu IRQ APIs work 77 * A subsequent request_percpu_irq() fails if percpu_devid is
65 * Ideally these details could be hidden in irq chip map function 78 * not set. That in turns sets NOAUTOEN, meaning each core needs
66 * but the issue is IPIs IRQs being static (non-DT) and platform 79 * to call enable_percpu_irq()
67 * specific, so we can't identify them there. 80 *
81 * For ARCv2, this is done in irq map function since we know
82 * which irqs are strictly per cpu
68 */ 83 */
69 irq_set_percpu_devid(irq); 84 irq_set_percpu_devid(irq);
70 irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */ 85#endif
71 86
72 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev); 87 rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
73 if (rc) 88 if (rc)
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 74a9b074ac3e..bd237acdf4f2 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
132struct plat_smp_ops plat_smp_ops = { 132struct plat_smp_ops plat_smp_ops = {
133 .info = smp_cpuinfo_buf, 133 .info = smp_cpuinfo_buf,
134 .init_early_smp = mcip_probe_n_setup, 134 .init_early_smp = mcip_probe_n_setup,
135 .init_irq_cpu = mcip_setup_per_cpu, 135 .init_per_cpu = mcip_setup_per_cpu,
136 .ipi_send = mcip_ipi_send, 136 .ipi_send = mcip_ipi_send,
137 .ipi_clear = mcip_ipi_clear, 137 .ipi_clear = mcip_ipi_clear,
138}; 138};
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 0c08bb1ce15a..8b134cfe5e1f 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
428 428
429#endif /* CONFIG_ISA_ARCV2 */ 429#endif /* CONFIG_ISA_ARCV2 */
430 430
431void arc_cpu_pmu_irq_init(void) 431static void arc_cpu_pmu_irq_init(void *data)
432{ 432{
433 struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu); 433 int irq = *(int *)data;
434 434
435 arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr, 435 enable_percpu_irq(irq, IRQ_TYPE_NONE);
436 "ARC perf counters", pmu_cpu);
437 436
438 /* Clear all pending interrupt flags */ 437 /* Clear all pending interrupt flags */
439 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 438 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
@@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
515 514
516 if (has_interrupts) { 515 if (has_interrupts) {
517 int irq = platform_get_irq(pdev, 0); 516 int irq = platform_get_irq(pdev, 0);
518 unsigned long flags;
519 517
520 if (irq < 0) { 518 if (irq < 0) {
521 pr_err("Cannot get IRQ number for the platform\n"); 519 pr_err("Cannot get IRQ number for the platform\n");
@@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
524 522
525 arc_pmu->irq = irq; 523 arc_pmu->irq = irq;
526 524
527 /* 525 /* intc map function ensures irq_set_percpu_devid() called */
528 * arc_cpu_pmu_irq_init() needs to be called on all cores for 526 request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
529 * their respective local PMU. 527 this_cpu_ptr(&arc_pmu_cpu));
530 * However we use opencoded on_each_cpu() to ensure it is called 528
531 * on core0 first, so that arc_request_percpu_irq() sets up 529 on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
532 * AUTOEN etc. Otherwise enable_percpu_irq() fails to enable 530
533 * perf IRQ on non master cores.
534 * see arc_request_percpu_irq()
535 */
536 preempt_disable();
537 local_irq_save(flags);
538 arc_cpu_pmu_irq_init();
539 local_irq_restore(flags);
540 smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
541 preempt_enable();
542
543 /* Clean all pending interrupt flags */
544 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
545 } else 531 } else
546 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 532 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
547 533
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 91d5a0f1f3f7..a3f750e76b68 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -44,11 +44,10 @@ SYSCALL_DEFINE0(arc_gettls)
44void arch_cpu_idle(void) 44void arch_cpu_idle(void)
45{ 45{
46 /* sleep, but enable all interrupts before committing */ 46 /* sleep, but enable all interrupts before committing */
47 if (is_isa_arcompact()) { 47 __asm__ __volatile__(
48 __asm__("sleep 0x3"); 48 "sleep %0 \n"
49 } else { 49 :
50 __asm__("sleep 0x10"); 50 :"I"(ISA_SLEEP_ARG)); /* can't be "r" has to be embedded const */
51 }
52} 51}
53 52
54asmlinkage void ret_from_fork(void); 53asmlinkage void ret_from_fork(void);
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index c33e77c0ad3e..e1b87444ea9a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
429#endif 429#endif
430 430
431 arc_unwind_init(); 431 arc_unwind_init();
432 arc_unwind_setup();
433} 432}
434 433
435static int __init customize_machine(void) 434static int __init customize_machine(void)
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index 580587805fa3..ef6e9e15b82a 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -132,11 +132,11 @@ void start_kernel_secondary(void)
132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu); 132 pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
133 133
134 /* Some SMP H/w setup - for each cpu */ 134 /* Some SMP H/w setup - for each cpu */
135 if (plat_smp_ops.init_irq_cpu) 135 if (plat_smp_ops.init_per_cpu)
136 plat_smp_ops.init_irq_cpu(cpu); 136 plat_smp_ops.init_per_cpu(cpu);
137 137
138 if (machine_desc->init_cpu_smp) 138 if (machine_desc->init_per_cpu)
139 machine_desc->init_cpu_smp(cpu); 139 machine_desc->init_per_cpu(cpu);
140 140
141 arc_local_timer_setup(); 141 arc_local_timer_setup();
142 142
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 93c6ea52b671..5eb707640e9c 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
170 170
171static unsigned long read_pointer(const u8 **pLoc, 171static unsigned long read_pointer(const u8 **pLoc,
172 const void *end, signed ptrType); 172 const void *end, signed ptrType);
173static void init_unwind_hdr(struct unwind_table *table,
174 void *(*alloc) (unsigned long));
175
176/*
177 * wrappers for header alloc (vs. calling one vs. other at call site)
178 * to elide section mismatches warnings
179 */
180static void *__init unw_hdr_alloc_early(unsigned long sz)
181{
182 return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
183 MAX_DMA_ADDRESS);
184}
185
186static void *unw_hdr_alloc(unsigned long sz)
187{
188 return kmalloc(sz, GFP_KERNEL);
189}
173 190
174static void init_unwind_table(struct unwind_table *table, const char *name, 191static void init_unwind_table(struct unwind_table *table, const char *name,
175 const void *core_start, unsigned long core_size, 192 const void *core_start, unsigned long core_size,
@@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
209 __start_unwind, __end_unwind - __start_unwind, 226 __start_unwind, __end_unwind - __start_unwind,
210 NULL, 0); 227 NULL, 0);
211 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/ 228 /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
229
230 init_unwind_hdr(&root_table, unw_hdr_alloc_early);
212} 231}
213 232
214static const u32 bad_cie, not_fde; 233static const u32 bad_cie, not_fde;
@@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
241 e2->fde = v; 260 e2->fde = v;
242} 261}
243 262
244static void __init setup_unwind_table(struct unwind_table *table, 263static void init_unwind_hdr(struct unwind_table *table,
245 void *(*alloc) (unsigned long)) 264 void *(*alloc) (unsigned long))
246{ 265{
247 const u8 *ptr; 266 const u8 *ptr;
248 unsigned long tableSize = table->size, hdrSize; 267 unsigned long tableSize = table->size, hdrSize;
@@ -277,10 +296,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
277 if (cie == &not_fde) 296 if (cie == &not_fde)
278 continue; 297 continue;
279 if (cie == NULL || cie == &bad_cie) 298 if (cie == NULL || cie == &bad_cie)
280 return; 299 goto ret_err;
281 ptrType = fde_pointer_type(cie); 300 ptrType = fde_pointer_type(cie);
282 if (ptrType < 0) 301 if (ptrType < 0)
283 return; 302 goto ret_err;
284 303
285 ptr = (const u8 *)(fde + 2); 304 ptr = (const u8 *)(fde + 2);
286 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde, 305 if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
@@ -296,13 +315,15 @@ static void __init setup_unwind_table(struct unwind_table *table,
296 } 315 }
297 316
298 if (tableSize || !n) 317 if (tableSize || !n)
299 return; 318 goto ret_err;
300 319
301 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) 320 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
302 + 2 * n * sizeof(unsigned long); 321 + 2 * n * sizeof(unsigned long);
322
303 header = alloc(hdrSize); 323 header = alloc(hdrSize);
304 if (!header) 324 if (!header)
305 return; 325 goto ret_err;
326
306 header->version = 1; 327 header->version = 1;
307 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native; 328 header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
308 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4; 329 header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
@@ -340,18 +361,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
340 table->hdrsz = hdrSize; 361 table->hdrsz = hdrSize;
341 smp_wmb(); 362 smp_wmb();
342 table->header = (const void *)header; 363 table->header = (const void *)header;
343} 364 return;
344
345static void *__init balloc(unsigned long sz)
346{
347 return __alloc_bootmem_nopanic(sz,
348 sizeof(unsigned int),
349 __pa(MAX_DMA_ADDRESS));
350}
351 365
352void __init arc_unwind_setup(void) 366ret_err:
353{ 367 panic("Attention !!! Dwarf FDE parsing errors\n");;
354 setup_unwind_table(&root_table, balloc);
355} 368}
356 369
357#ifdef CONFIG_MODULES 370#ifdef CONFIG_MODULES
@@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
377 table_start, table_size, 390 table_start, table_size,
378 NULL, 0); 391 NULL, 0);
379 392
393 init_unwind_hdr(table, unw_hdr_alloc);
394
380#ifdef UNWIND_DEBUG 395#ifdef UNWIND_DEBUG
381 unw_debug("Table added for [%s] %lx %lx\n", 396 unw_debug("Table added for [%s] %lx %lx\n",
382 module->name, table->core.pc, table->core.range); 397 module->name, table->core.pc, table->core.range);
@@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
439 info.init_only = init_only; 454 info.init_only = init_only;
440 455
441 unlink_table(&info); /* XXX: SMP */ 456 unlink_table(&info); /* XXX: SMP */
457 kfree(table->header);
442 kfree(table); 458 kfree(table);
443} 459}
444 460
@@ -588,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
588 const u8 *ptr = (const u8 *)(cie + 2); 604 const u8 *ptr = (const u8 *)(cie + 2);
589 unsigned version = *ptr; 605 unsigned version = *ptr;
590 606
591 if (version != 1)
592 return -1; /* unsupported */
593
594 if (*++ptr) { 607 if (*++ptr) {
595 const char *aug; 608 const char *aug;
596 const u8 *end = (const u8 *)(cie + 1) + *cie; 609 const u8 *end = (const u8 *)(cie + 1) + *cie;
@@ -986,42 +999,13 @@ int arc_unwind(struct unwind_frame_info *frame)
986 (const u8 *)(fde + 999 (const u8 *)(fde +
987 1) + 1000 1) +
988 *fde, ptrType); 1001 *fde, ptrType);
989 if (pc >= endLoc) 1002 if (pc >= endLoc) {
990 fde = NULL; 1003 fde = NULL;
991 } else
992 fde = NULL;
993 }
994 if (fde == NULL) {
995 for (fde = table->address, tableSize = table->size;
996 cie = NULL, tableSize > sizeof(*fde)
997 && tableSize - sizeof(*fde) >= *fde;
998 tableSize -= sizeof(*fde) + *fde,
999 fde += 1 + *fde / sizeof(*fde)) {
1000 cie = cie_for_fde(fde, table);
1001 if (cie == &bad_cie) {
1002 cie = NULL; 1004 cie = NULL;
1003 break;
1004 } 1005 }
1005 if (cie == NULL 1006 } else {
1006 || cie == &not_fde 1007 fde = NULL;
1007 || (ptrType = fde_pointer_type(cie)) < 0) 1008 cie = NULL;
1008 continue;
1009 ptr = (const u8 *)(fde + 2);
1010 startLoc = read_pointer(&ptr,
1011 (const u8 *)(fde + 1) +
1012 *fde, ptrType);
1013 if (!startLoc)
1014 continue;
1015 if (!(ptrType & DW_EH_PE_indirect))
1016 ptrType &=
1017 DW_EH_PE_FORM | DW_EH_PE_signed;
1018 endLoc =
1019 startLoc + read_pointer(&ptr,
1020 (const u8 *)(fde +
1021 1) +
1022 *fde, ptrType);
1023 if (pc >= startLoc && pc < endLoc)
1024 break;
1025 } 1009 }
1026 } 1010 }
1027 } 1011 }
@@ -1031,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
1031 ptr = (const u8 *)(cie + 2); 1015 ptr = (const u8 *)(cie + 2);
1032 end = (const u8 *)(cie + 1) + *cie; 1016 end = (const u8 *)(cie + 1) + *cie;
1033 frame->call_frame = 1; 1017 frame->call_frame = 1;
1034 if ((state.version = *ptr) != 1) 1018 if (*++ptr) {
1035 cie = NULL; /* unsupported version */
1036 else if (*++ptr) {
1037 /* check if augmentation size is first (thus present) */ 1019 /* check if augmentation size is first (thus present) */
1038 if (*ptr == 'z') { 1020 if (*ptr == 'z') {
1039 while (++ptr < end && *ptr) { 1021 while (++ptr < end && *ptr) {
diff --git a/arch/arc/mm/highmem.c b/arch/arc/mm/highmem.c
index 065ee6bfa82a..92dd92cad7f9 100644
--- a/arch/arc/mm/highmem.c
+++ b/arch/arc/mm/highmem.c
@@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv)
111} 111}
112EXPORT_SYMBOL(__kunmap_atomic); 112EXPORT_SYMBOL(__kunmap_atomic);
113 113
114noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr) 114static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
115{ 115{
116 pgd_t *pgd_k; 116 pgd_t *pgd_k;
117 pud_t *pud_k; 117 pud_t *pud_k;
@@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
127 return pte_k; 127 return pte_k;
128} 128}
129 129
130void kmap_init(void) 130void __init kmap_init(void)
131{ 131{
132 /* Due to recursive include hell, we can't do this in processor.h */ 132 /* Due to recursive include hell, we can't do this in processor.h */
133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE)); 133 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index a9305b5a2cd4..7d2c4fbf4f22 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
51 int in_use = 0; 51 int in_use = 0;
52 52
53 if (!low_mem_sz) { 53 if (!low_mem_sz) {
54 BUG_ON(base != low_mem_start); 54 if (base != low_mem_start)
55 panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
56
55 low_mem_sz = size; 57 low_mem_sz = size;
56 in_use = 1; 58 in_use = 1;
57 } else { 59 } else {
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 0ee739846847..daf2bf52b984 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -619,10 +619,10 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
619 619
620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags); 620 int dirty = !test_and_set_bit(PG_dc_clean, &page->flags);
621 if (dirty) { 621 if (dirty) {
622 /* wback + inv dcache lines */ 622 /* wback + inv dcache lines (K-mapping) */
623 __flush_dcache_page(paddr, paddr); 623 __flush_dcache_page(paddr, paddr);
624 624
625 /* invalidate any existing icache lines */ 625 /* invalidate any existing icache lines (U-mapping) */
626 if (vma->vm_flags & VM_EXEC) 626 if (vma->vm_flags & VM_EXEC)
627 __inv_icache_page(paddr, vaddr); 627 __inv_icache_page(paddr, vaddr);
628 } 628 }
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a5d416ec1d01..688dc7b0d951 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -76,6 +76,8 @@ config ARM
76 select IRQ_FORCED_THREADING 76 select IRQ_FORCED_THREADING
77 select MODULES_USE_ELF_REL 77 select MODULES_USE_ELF_REL
78 select NO_BOOTMEM 78 select NO_BOOTMEM
79 select OF_EARLY_FLATTREE if OF
80 select OF_RESERVED_MEM if OF
79 select OLD_SIGACTION 81 select OLD_SIGACTION
80 select OLD_SIGSUSPEND3 82 select OLD_SIGSUSPEND3
81 select PERF_USE_VMALLOC 83 select PERF_USE_VMALLOC
@@ -1825,8 +1827,6 @@ config USE_OF
1825 bool "Flattened Device Tree support" 1827 bool "Flattened Device Tree support"
1826 select IRQ_DOMAIN 1828 select IRQ_DOMAIN
1827 select OF 1829 select OF
1828 select OF_EARLY_FLATTREE
1829 select OF_RESERVED_MEM
1830 help 1830 help
1831 Include support for flattened device tree machine descriptions. 1831 Include support for flattened device tree machine descriptions.
1832 1832
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index d83ff9c9701e..de8791a4d131 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -74,7 +74,7 @@
74 reg = <0x48240200 0x100>; 74 reg = <0x48240200 0x100>;
75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; 75 interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
76 interrupt-parent = <&gic>; 76 interrupt-parent = <&gic>;
77 clocks = <&dpll_mpu_m2_ck>; 77 clocks = <&mpu_periphclk>;
78 }; 78 };
79 79
80 local_timer: timer@48240600 { 80 local_timer: timer@48240600 {
@@ -82,7 +82,7 @@
82 reg = <0x48240600 0x100>; 82 reg = <0x48240600 0x100>;
83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; 83 interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
84 interrupt-parent = <&gic>; 84 interrupt-parent = <&gic>;
85 clocks = <&dpll_mpu_m2_ck>; 85 clocks = <&mpu_periphclk>;
86 }; 86 };
87 87
88 l2-cache-controller@48242000 { 88 l2-cache-controller@48242000 {
diff --git a/arch/arm/boot/dts/am43xx-clocks.dtsi b/arch/arm/boot/dts/am43xx-clocks.dtsi
index cc88728d751d..a38af2bfbfcf 100644
--- a/arch/arm/boot/dts/am43xx-clocks.dtsi
+++ b/arch/arm/boot/dts/am43xx-clocks.dtsi
@@ -259,6 +259,14 @@
259 ti,invert-autoidle-bit; 259 ti,invert-autoidle-bit;
260 }; 260 };
261 261
262 mpu_periphclk: mpu_periphclk {
263 #clock-cells = <0>;
264 compatible = "fixed-factor-clock";
265 clocks = <&dpll_mpu_m2_ck>;
266 clock-mult = <1>;
267 clock-div = <2>;
268 };
269
262 dpll_ddr_ck: dpll_ddr_ck { 270 dpll_ddr_ck: dpll_ddr_ck {
263 #clock-cells = <0>; 271 #clock-cells = <0>;
264 compatible = "ti,am3-dpll-clock"; 272 compatible = "ti,am3-dpll-clock";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index d9ba6b879fc1..00352e761b8c 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -604,6 +604,7 @@
604 reg = <0x6f>; 604 reg = <0x6f>;
605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>, 605 interrupts-extended = <&crossbar_mpu GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
606 <&dra7_pmx_core 0x424>; 606 <&dra7_pmx_core 0x424>;
607 interrupt-names = "irq", "wakeup";
607 608
608 pinctrl-names = "default"; 609 pinctrl-names = "default";
609 pinctrl-0 = <&mcp79410_pins_default>; 610 pinctrl-0 = <&mcp79410_pins_default>;
diff --git a/arch/arm/boot/dts/animeo_ip.dts b/arch/arm/boot/dts/animeo_ip.dts
index 4e0ad3b82796..0962f2fa3f6e 100644
--- a/arch/arm/boot/dts/animeo_ip.dts
+++ b/arch/arm/boot/dts/animeo_ip.dts
@@ -155,21 +155,21 @@
155 label = "keyswitch_in"; 155 label = "keyswitch_in";
156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>; 156 gpios = <&pioB 1 GPIO_ACTIVE_HIGH>;
157 linux,code = <28>; 157 linux,code = <28>;
158 gpio-key,wakeup; 158 wakeup-source;
159 }; 159 };
160 160
161 error_in { 161 error_in {
162 label = "error_in"; 162 label = "error_in";
163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>; 163 gpios = <&pioB 2 GPIO_ACTIVE_HIGH>;
164 linux,code = <29>; 164 linux,code = <29>;
165 gpio-key,wakeup; 165 wakeup-source;
166 }; 166 };
167 167
168 btn { 168 btn {
169 label = "btn"; 169 label = "btn";
170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>; 170 gpios = <&pioC 23 GPIO_ACTIVE_HIGH>;
171 linux,code = <31>; 171 linux,code = <31>;
172 gpio-key,wakeup; 172 wakeup-source;
173 }; 173 };
174 }; 174 };
175}; 175};
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index c6a0e9d7f1a9..e8b7f6726772 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -498,6 +498,7 @@
498 reg = <0x70000 0x4000>; 498 reg = <0x70000 0x4000>;
499 interrupts-extended = <&mpic 8>; 499 interrupts-extended = <&mpic 8>;
500 clocks = <&gateclk 4>; 500 clocks = <&gateclk 4>;
501 tx-csum-limit = <9800>;
501 status = "disabled"; 502 status = "disabled";
502 }; 503 };
503 504
diff --git a/arch/arm/boot/dts/at91-foxg20.dts b/arch/arm/boot/dts/at91-foxg20.dts
index f89598af4c2b..6bf873e7d96c 100644
--- a/arch/arm/boot/dts/at91-foxg20.dts
+++ b/arch/arm/boot/dts/at91-foxg20.dts
@@ -159,7 +159,7 @@
159 label = "Button"; 159 label = "Button";
160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 160 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
161 linux,code = <0x103>; 161 linux,code = <0x103>;
162 gpio-key,wakeup; 162 wakeup-source;
163 }; 163 };
164 }; 164 };
165}; 165};
diff --git a/arch/arm/boot/dts/at91-kizbox.dts b/arch/arm/boot/dts/at91-kizbox.dts
index bf18ece0c027..229e989eb60d 100644
--- a/arch/arm/boot/dts/at91-kizbox.dts
+++ b/arch/arm/boot/dts/at91-kizbox.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <18432000>;
34 };
35
36 main_xtal { 27 main_xtal {
37 clock-frequency = <18432000>; 28 clock-frequency = <18432000>;
38 }; 29 };
@@ -94,14 +85,14 @@
94 label = "PB_RST"; 85 label = "PB_RST";
95 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>; 86 gpios = <&pioB 30 GPIO_ACTIVE_HIGH>;
96 linux,code = <0x100>; 87 linux,code = <0x100>;
97 gpio-key,wakeup; 88 wakeup-source;
98 }; 89 };
99 90
100 user { 91 user {
101 label = "PB_USER"; 92 label = "PB_USER";
102 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>; 93 gpios = <&pioB 31 GPIO_ACTIVE_HIGH>;
103 linux,code = <0x101>; 94 linux,code = <0x101>;
104 gpio-key,wakeup; 95 wakeup-source;
105 }; 96 };
106 }; 97 };
107 98
diff --git a/arch/arm/boot/dts/at91-kizbox2.dts b/arch/arm/boot/dts/at91-kizbox2.dts
index f0b1563cb3f1..50a14568f094 100644
--- a/arch/arm/boot/dts/at91-kizbox2.dts
+++ b/arch/arm/boot/dts/at91-kizbox2.dts
@@ -171,21 +171,21 @@
171 label = "PB_PROG"; 171 label = "PB_PROG";
172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>; 172 gpios = <&pioE 27 GPIO_ACTIVE_LOW>;
173 linux,code = <0x102>; 173 linux,code = <0x102>;
174 gpio-key,wakeup; 174 wakeup-source;
175 }; 175 };
176 176
177 reset { 177 reset {
178 label = "PB_RST"; 178 label = "PB_RST";
179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 179 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
180 linux,code = <0x100>; 180 linux,code = <0x100>;
181 gpio-key,wakeup; 181 wakeup-source;
182 }; 182 };
183 183
184 user { 184 user {
185 label = "PB_USER"; 185 label = "PB_USER";
186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>; 186 gpios = <&pioE 31 GPIO_ACTIVE_HIGH>;
187 linux,code = <0x101>; 187 linux,code = <0x101>;
188 gpio-key,wakeup; 188 wakeup-source;
189 }; 189 };
190 }; 190 };
191 191
diff --git a/arch/arm/boot/dts/at91-kizboxmini.dts b/arch/arm/boot/dts/at91-kizboxmini.dts
index 9f72b4932634..9682d105d4d8 100644
--- a/arch/arm/boot/dts/at91-kizboxmini.dts
+++ b/arch/arm/boot/dts/at91-kizboxmini.dts
@@ -98,14 +98,14 @@
98 label = "PB_PROG"; 98 label = "PB_PROG";
99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>; 99 gpios = <&pioC 17 GPIO_ACTIVE_LOW>;
100 linux,code = <0x102>; 100 linux,code = <0x102>;
101 gpio-key,wakeup; 101 wakeup-source;
102 }; 102 };
103 103
104 reset { 104 reset {
105 label = "PB_RST"; 105 label = "PB_RST";
106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>; 106 gpios = <&pioC 16 GPIO_ACTIVE_LOW>;
107 linux,code = <0x100>; 107 linux,code = <0x100>;
108 gpio-key,wakeup; 108 wakeup-source;
109 }; 109 };
110 }; 110 };
111 111
diff --git a/arch/arm/boot/dts/at91-qil_a9260.dts b/arch/arm/boot/dts/at91-qil_a9260.dts
index a9aef53ab764..4f2eebf4a560 100644
--- a/arch/arm/boot/dts/at91-qil_a9260.dts
+++ b/arch/arm/boot/dts/at91-qil_a9260.dts
@@ -183,7 +183,7 @@
183 label = "user_pb"; 183 label = "user_pb";
184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 184 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
185 linux,code = <28>; 185 linux,code = <28>;
186 gpio-key,wakeup; 186 wakeup-source;
187 }; 187 };
188 }; 188 };
189 189
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
index e07c2b206beb..e74df327cdd3 100644
--- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts
@@ -45,6 +45,7 @@
45/dts-v1/; 45/dts-v1/;
46#include "sama5d2.dtsi" 46#include "sama5d2.dtsi"
47#include "sama5d2-pinfunc.h" 47#include "sama5d2-pinfunc.h"
48#include <dt-bindings/mfd/atmel-flexcom.h>
48 49
49/ { 50/ {
50 model = "Atmel SAMA5D2 Xplained"; 51 model = "Atmel SAMA5D2 Xplained";
@@ -59,15 +60,6 @@
59 }; 60 };
60 61
61 clocks { 62 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 63 slow_xtal {
72 clock-frequency = <32768>; 64 clock-frequency = <32768>;
73 }; 65 };
@@ -91,6 +83,22 @@
91 status = "okay"; 83 status = "okay";
92 }; 84 };
93 85
86 sdmmc0: sdio-host@a0000000 {
87 bus-width = <8>;
88 pinctrl-names = "default";
89 pinctrl-0 = <&pinctrl_sdmmc0_default>;
90 non-removable;
91 mmc-ddr-1_8v;
92 status = "okay";
93 };
94
95 sdmmc1: sdio-host@b0000000 {
96 bus-width = <4>;
97 pinctrl-names = "default";
98 pinctrl-0 = <&pinctrl_sdmmc1_default>;
99 status = "okay"; /* conflict with qspi0 */
100 };
101
94 apb { 102 apb {
95 spi0: spi@f8000000 { 103 spi0: spi@f8000000 {
96 pinctrl-names = "default"; 104 pinctrl-names = "default";
@@ -176,17 +184,55 @@
176 regulator-name = "VDD_SDHC_1V8"; 184 regulator-name = "VDD_SDHC_1V8";
177 regulator-min-microvolt = <1800000>; 185 regulator-min-microvolt = <1800000>;
178 regulator-max-microvolt = <1800000>; 186 regulator-max-microvolt = <1800000>;
187 regulator-always-on;
179 }; 188 };
180 }; 189 };
181 }; 190 };
182 }; 191 };
183 192
193 flx0: flexcom@f8034000 {
194 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_USART>;
195 status = "disabled"; /* conflict with ISC_D2 & ISC_D3 data pins */
196
197 uart5: serial@200 {
198 compatible = "atmel,at91sam9260-usart";
199 reg = <0x200 0x200>;
200 interrupts = <19 IRQ_TYPE_LEVEL_HIGH 7>;
201 clocks = <&flx0_clk>;
202 clock-names = "usart";
203 pinctrl-names = "default";
204 pinctrl-0 = <&pinctrl_flx0_default>;
205 atmel,fifo-size = <32>;
206 status = "okay";
207 };
208 };
209
184 uart3: serial@fc008000 { 210 uart3: serial@fc008000 {
185 pinctrl-names = "default"; 211 pinctrl-names = "default";
186 pinctrl-0 = <&pinctrl_uart3_default>; 212 pinctrl-0 = <&pinctrl_uart3_default>;
187 status = "okay"; 213 status = "okay";
188 }; 214 };
189 215
216 flx4: flexcom@fc018000 {
217 atmel,flexcom-mode = <ATMEL_FLEXCOM_MODE_TWI>;
218 status = "okay";
219
220 i2c2: i2c@600 {
221 compatible = "atmel,sama5d2-i2c";
222 reg = <0x600 0x200>;
223 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 7>;
224 dmas = <0>, <0>;
225 dma-names = "tx", "rx";
226 #address-cells = <1>;
227 #size-cells = <0>;
228 clocks = <&flx4_clk>;
229 pinctrl-names = "default";
230 pinctrl-0 = <&pinctrl_flx4_default>;
231 atmel,fifo-size = <16>;
232 status = "okay";
233 };
234 };
235
190 i2c1: i2c@fc028000 { 236 i2c1: i2c@fc028000 {
191 dmas = <0>, <0>; 237 dmas = <0>, <0>;
192 pinctrl-names = "default"; 238 pinctrl-names = "default";
@@ -201,6 +247,18 @@
201 }; 247 };
202 248
203 pinctrl@fc038000 { 249 pinctrl@fc038000 {
250 pinctrl_flx0_default: flx0_default {
251 pinmux = <PIN_PB28__FLEXCOM0_IO0>,
252 <PIN_PB29__FLEXCOM0_IO1>;
253 bias-disable;
254 };
255
256 pinctrl_flx4_default: flx4_default {
257 pinmux = <PIN_PD12__FLEXCOM4_IO0>,
258 <PIN_PD13__FLEXCOM4_IO1>;
259 bias-disable;
260 };
261
204 pinctrl_i2c0_default: i2c0_default { 262 pinctrl_i2c0_default: i2c0_default {
205 pinmux = <PIN_PD21__TWD0>, 263 pinmux = <PIN_PD21__TWD0>,
206 <PIN_PD22__TWCK0>; 264 <PIN_PD22__TWCK0>;
@@ -227,6 +285,46 @@
227 bias-disable; 285 bias-disable;
228 }; 286 };
229 287
288 pinctrl_sdmmc0_default: sdmmc0_default {
289 cmd_data {
290 pinmux = <PIN_PA1__SDMMC0_CMD>,
291 <PIN_PA2__SDMMC0_DAT0>,
292 <PIN_PA3__SDMMC0_DAT1>,
293 <PIN_PA4__SDMMC0_DAT2>,
294 <PIN_PA5__SDMMC0_DAT3>,
295 <PIN_PA6__SDMMC0_DAT4>,
296 <PIN_PA7__SDMMC0_DAT5>,
297 <PIN_PA8__SDMMC0_DAT6>,
298 <PIN_PA9__SDMMC0_DAT7>;
299 bias-pull-up;
300 };
301
302 ck_cd_rstn_vddsel {
303 pinmux = <PIN_PA0__SDMMC0_CK>,
304 <PIN_PA10__SDMMC0_RSTN>,
305 <PIN_PA11__SDMMC0_VDDSEL>,
306 <PIN_PA13__SDMMC0_CD>;
307 bias-disable;
308 };
309 };
310
311 pinctrl_sdmmc1_default: sdmmc1_default {
312 cmd_data {
313 pinmux = <PIN_PA28__SDMMC1_CMD>,
314 <PIN_PA18__SDMMC1_DAT0>,
315 <PIN_PA19__SDMMC1_DAT1>,
316 <PIN_PA20__SDMMC1_DAT2>,
317 <PIN_PA21__SDMMC1_DAT3>;
318 bias-pull-up;
319 };
320
321 conf-ck_cd {
322 pinmux = <PIN_PA22__SDMMC1_CK>,
323 <PIN_PA30__SDMMC1_CD>;
324 bias-disable;
325 };
326 };
327
230 pinctrl_spi0_default: spi0_default { 328 pinctrl_spi0_default: spi0_default {
231 pinmux = <PIN_PA14__SPI0_SPCK>, 329 pinmux = <PIN_PA14__SPI0_SPCK>,
232 <PIN_PA15__SPI0_MOSI>, 330 <PIN_PA15__SPI0_MOSI>,
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index 8488ac53d22d..ff888d21c786 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -315,7 +315,7 @@
315 label = "PB_USER"; 315 label = "PB_USER";
316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>; 316 gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
317 linux,code = <0x104>; 317 linux,code = <0x104>;
318 gpio-key,wakeup; 318 wakeup-source;
319 }; 319 };
320 }; 320 };
321 321
diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
index 45371a1b61b3..131614f28e75 100644
--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4-xplained", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -235,7 +225,7 @@
235 label = "pb_user1"; 225 label = "pb_user1";
236 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>; 226 gpios = <&pioE 8 GPIO_ACTIVE_HIGH>;
237 linux,code = <0x100>; 227 linux,code = <0x100>;
238 gpio-key,wakeup; 228 wakeup-source;
239 }; 229 };
240 }; 230 };
241 231
diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts
index 6d272c0125e3..2d4a33100af6 100644
--- a/arch/arm/boot/dts/at91-sama5d4ek.dts
+++ b/arch/arm/boot/dts/at91-sama5d4ek.dts
@@ -50,7 +50,6 @@
50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5"; 50 compatible = "atmel,sama5d4ek", "atmel,sama5d4", "atmel,sama5";
51 51
52 chosen { 52 chosen {
53 bootargs = "ignore_loglevel earlyprintk";
54 stdout-path = "serial0:115200n8"; 53 stdout-path = "serial0:115200n8";
55 }; 54 };
56 55
@@ -59,15 +58,6 @@
59 }; 58 };
60 59
61 clocks { 60 clocks {
62 #address-cells = <1>;
63 #size-cells = <1>;
64 ranges;
65
66 main_clock: clock@0 {
67 compatible = "atmel,osc", "fixed-clock";
68 clock-frequency = <12000000>;
69 };
70
71 slow_xtal { 61 slow_xtal {
72 clock-frequency = <32768>; 62 clock-frequency = <32768>;
73 }; 63 };
@@ -304,7 +294,7 @@
304 label = "pb_user1"; 294 label = "pb_user1";
305 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>; 295 gpios = <&pioE 13 GPIO_ACTIVE_HIGH>;
306 linux,code = <0x100>; 296 linux,code = <0x100>;
307 gpio-key,wakeup; 297 wakeup-source;
308 }; 298 };
309 }; 299 };
310 300
diff --git a/arch/arm/boot/dts/at91rm9200ek.dts b/arch/arm/boot/dts/at91rm9200ek.dts
index 8dab4b75ca97..f90e1c2d3caa 100644
--- a/arch/arm/boot/dts/at91rm9200ek.dts
+++ b/arch/arm/boot/dts/at91rm9200ek.dts
@@ -21,15 +21,6 @@
21 }; 21 };
22 22
23 clocks { 23 clocks {
24 #address-cells = <1>;
25 #size-cells = <1>;
26 ranges;
27
28 main_clock: clock@0 {
29 compatible = "atmel,osc", "fixed-clock";
30 clock-frequency = <18432000>;
31 };
32
33 slow_xtal { 24 slow_xtal {
34 clock-frequency = <32768>; 25 clock-frequency = <32768>;
35 }; 26 };
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index 2e92ac020f23..55bd51f07fa6 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <18432000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -149,7 +140,7 @@
149 ti,debounce-tol = /bits/ 16 <65535>; 140 ti,debounce-tol = /bits/ 16 <65535>;
150 ti,debounce-max = /bits/ 16 <1>; 141 ti,debounce-max = /bits/ 16 <1>;
151 142
152 linux,wakeup; 143 wakeup-source;
153 }; 144 };
154 }; 145 };
155 146
@@ -193,28 +184,28 @@
193 label = "button_0"; 184 label = "button_0";
194 gpios = <&pioA 27 GPIO_ACTIVE_LOW>; 185 gpios = <&pioA 27 GPIO_ACTIVE_LOW>;
195 linux,code = <256>; 186 linux,code = <256>;
196 gpio-key,wakeup; 187 wakeup-source;
197 }; 188 };
198 189
199 button_1 { 190 button_1 {
200 label = "button_1"; 191 label = "button_1";
201 gpios = <&pioA 26 GPIO_ACTIVE_LOW>; 192 gpios = <&pioA 26 GPIO_ACTIVE_LOW>;
202 linux,code = <257>; 193 linux,code = <257>;
203 gpio-key,wakeup; 194 wakeup-source;
204 }; 195 };
205 196
206 button_2 { 197 button_2 {
207 label = "button_2"; 198 label = "button_2";
208 gpios = <&pioA 25 GPIO_ACTIVE_LOW>; 199 gpios = <&pioA 25 GPIO_ACTIVE_LOW>;
209 linux,code = <258>; 200 linux,code = <258>;
210 gpio-key,wakeup; 201 wakeup-source;
211 }; 202 };
212 203
213 button_3 { 204 button_3 {
214 label = "button_3"; 205 label = "button_3";
215 gpios = <&pioA 24 GPIO_ACTIVE_LOW>; 206 gpios = <&pioA 24 GPIO_ACTIVE_LOW>;
216 linux,code = <259>; 207 linux,code = <259>;
217 gpio-key,wakeup; 208 wakeup-source;
218 }; 209 };
219 }; 210 };
220}; 211};
diff --git a/arch/arm/boot/dts/at91sam9263ek.dts b/arch/arm/boot/dts/at91sam9263ek.dts
index 23381276ffb8..59df9d73d276 100644
--- a/arch/arm/boot/dts/at91sam9263ek.dts
+++ b/arch/arm/boot/dts/at91sam9263ek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock@0 {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <16367660>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -213,14 +204,14 @@
213 label = "left_click"; 204 label = "left_click";
214 gpios = <&pioC 5 GPIO_ACTIVE_LOW>; 205 gpios = <&pioC 5 GPIO_ACTIVE_LOW>;
215 linux,code = <272>; 206 linux,code = <272>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 209
219 right_click { 210 right_click {
220 label = "right_click"; 211 label = "right_click";
221 gpios = <&pioC 4 GPIO_ACTIVE_LOW>; 212 gpios = <&pioC 4 GPIO_ACTIVE_LOW>;
222 linux,code = <273>; 213 linux,code = <273>;
223 gpio-key,wakeup; 214 wakeup-source;
224 }; 215 };
225 }; 216 };
226 217
diff --git a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
index 57548a2c5a1e..e9cc99b6353a 100644
--- a/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20ek_common.dtsi
@@ -19,15 +19,6 @@
19 }; 19 };
20 20
21 clocks { 21 clocks {
22 #address-cells = <1>;
23 #size-cells = <1>;
24 ranges;
25
26 main_clock: clock@0 {
27 compatible = "atmel,osc", "fixed-clock";
28 clock-frequency = <18432000>;
29 };
30
31 slow_xtal { 22 slow_xtal {
32 clock-frequency = <32768>; 23 clock-frequency = <32768>;
33 }; 24 };
@@ -206,14 +197,14 @@
206 label = "Button 3"; 197 label = "Button 3";
207 gpios = <&pioA 30 GPIO_ACTIVE_LOW>; 198 gpios = <&pioA 30 GPIO_ACTIVE_LOW>;
208 linux,code = <0x103>; 199 linux,code = <0x103>;
209 gpio-key,wakeup; 200 wakeup-source;
210 }; 201 };
211 202
212 btn4 { 203 btn4 {
213 label = "Button 4"; 204 label = "Button 4";
214 gpios = <&pioA 31 GPIO_ACTIVE_LOW>; 205 gpios = <&pioA 31 GPIO_ACTIVE_LOW>;
215 linux,code = <0x104>; 206 linux,code = <0x104>;
216 gpio-key,wakeup; 207 wakeup-source;
217 }; 208 };
218 }; 209 };
219 210
diff --git a/arch/arm/boot/dts/at91sam9m10g45ek.dts b/arch/arm/boot/dts/at91sam9m10g45ek.dts
index 9d16ef8453c5..2400c99134f7 100644
--- a/arch/arm/boot/dts/at91sam9m10g45ek.dts
+++ b/arch/arm/boot/dts/at91sam9m10g45ek.dts
@@ -24,15 +24,6 @@
24 }; 24 };
25 25
26 clocks { 26 clocks {
27 #address-cells = <1>;
28 #size-cells = <1>;
29 ranges;
30
31 main_clock: clock@0 {
32 compatible = "atmel,osc", "fixed-clock";
33 clock-frequency = <12000000>;
34 };
35
36 slow_xtal { 27 slow_xtal {
37 clock-frequency = <32768>; 28 clock-frequency = <32768>;
38 }; 29 };
@@ -323,14 +314,14 @@
323 label = "left_click"; 314 label = "left_click";
324 gpios = <&pioB 6 GPIO_ACTIVE_LOW>; 315 gpios = <&pioB 6 GPIO_ACTIVE_LOW>;
325 linux,code = <272>; 316 linux,code = <272>;
326 gpio-key,wakeup; 317 wakeup-source;
327 }; 318 };
328 319
329 right_click { 320 right_click {
330 label = "right_click"; 321 label = "right_click";
331 gpios = <&pioB 7 GPIO_ACTIVE_LOW>; 322 gpios = <&pioB 7 GPIO_ACTIVE_LOW>;
332 linux,code = <273>; 323 linux,code = <273>;
333 gpio-key,wakeup; 324 wakeup-source;
334 }; 325 };
335 326
336 left { 327 left {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index acf3451a332d..ca4ddf86817a 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -23,15 +23,6 @@
23 }; 23 };
24 24
25 clocks { 25 clocks {
26 #address-cells = <1>;
27 #size-cells = <1>;
28 ranges;
29
30 main_clock: clock@0 {
31 compatible = "atmel,osc", "fixed-clock";
32 clock-frequency = <16000000>;
33 };
34
35 slow_xtal { 26 slow_xtal {
36 clock-frequency = <32768>; 27 clock-frequency = <32768>;
37 }; 28 };
@@ -219,7 +210,7 @@
219 label = "Enter"; 210 label = "Enter";
220 gpios = <&pioB 3 GPIO_ACTIVE_LOW>; 211 gpios = <&pioB 3 GPIO_ACTIVE_LOW>;
221 linux,code = <28>; 212 linux,code = <28>;
222 gpio-key,wakeup; 213 wakeup-source;
223 }; 214 };
224 }; 215 };
225 216
diff --git a/arch/arm/boot/dts/at91sam9rlek.dts b/arch/arm/boot/dts/at91sam9rlek.dts
index 558c9f220bed..f10566f759cd 100644
--- a/arch/arm/boot/dts/at91sam9rlek.dts
+++ b/arch/arm/boot/dts/at91sam9rlek.dts
@@ -22,15 +22,6 @@
22 }; 22 };
23 23
24 clocks { 24 clocks {
25 #address-cells = <1>;
26 #size-cells = <1>;
27 ranges;
28
29 main_clock: clock {
30 compatible = "atmel,osc", "fixed-clock";
31 clock-frequency = <12000000>;
32 };
33
34 slow_xtal { 25 slow_xtal {
35 clock-frequency = <32768>; 26 clock-frequency = <32768>;
36 }; 27 };
@@ -225,14 +216,14 @@
225 label = "right_click"; 216 label = "right_click";
226 gpios = <&pioB 0 GPIO_ACTIVE_LOW>; 217 gpios = <&pioB 0 GPIO_ACTIVE_LOW>;
227 linux,code = <273>; 218 linux,code = <273>;
228 gpio-key,wakeup; 219 wakeup-source;
229 }; 220 };
230 221
231 left_click { 222 left_click {
232 label = "left_click"; 223 label = "left_click";
233 gpios = <&pioB 1 GPIO_ACTIVE_LOW>; 224 gpios = <&pioB 1 GPIO_ACTIVE_LOW>;
234 linux,code = <272>; 225 linux,code = <272>;
235 gpio-key,wakeup; 226 wakeup-source;
236 }; 227 };
237 }; 228 };
238 229
diff --git a/arch/arm/boot/dts/at91sam9x5cm.dtsi b/arch/arm/boot/dts/at91sam9x5cm.dtsi
index 26112ebd15fc..b098ad8cd93a 100644
--- a/arch/arm/boot/dts/at91sam9x5cm.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5cm.dtsi
@@ -13,17 +13,6 @@
13 }; 13 };
14 14
15 clocks { 15 clocks {
16 #address-cells = <1>;
17 #size-cells = <1>;
18 ranges;
19
20 main_clock: clock@0 {
21 compatible = "atmel,osc", "fixed-clock";
22 clock-frequency = <12000000>;
23 };
24 };
25
26 clocks {
27 slow_xtal { 16 slow_xtal {
28 clock-frequency = <32768>; 17 clock-frequency = <32768>;
29 }; 18 };
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 8ea177f375dd..fb1da99996ea 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -118,7 +118,8 @@
118 sdhci0: sdhci@ab0000 { 118 sdhci0: sdhci@ab0000 {
119 compatible = "mrvl,pxav3-mmc"; 119 compatible = "mrvl,pxav3-mmc";
120 reg = <0xab0000 0x200>; 120 reg = <0xab0000 0x200>;
121 clocks = <&chip_clk CLKID_SDIO1XIN>; 121 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
122 clock-names = "io", "core";
122 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; 123 interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
123 status = "disabled"; 124 status = "disabled";
124 }; 125 };
@@ -126,7 +127,8 @@
126 sdhci1: sdhci@ab0800 { 127 sdhci1: sdhci@ab0800 {
127 compatible = "mrvl,pxav3-mmc"; 128 compatible = "mrvl,pxav3-mmc";
128 reg = <0xab0800 0x200>; 129 reg = <0xab0800 0x200>;
129 clocks = <&chip_clk CLKID_SDIO1XIN>; 130 clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
131 clock-names = "io", "core";
130 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; 132 interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
131 status = "disabled"; 133 status = "disabled";
132 }; 134 };
@@ -135,7 +137,7 @@
135 compatible = "mrvl,pxav3-mmc"; 137 compatible = "mrvl,pxav3-mmc";
136 reg = <0xab1000 0x200>; 138 reg = <0xab1000 0x200>;
137 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; 139 interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
138 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>; 140 clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
139 clock-names = "io", "core"; 141 clock-names = "io", "core";
140 status = "disabled"; 142 status = "disabled";
141 }; 143 };
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi
index 3c99cfa1a876..eee636de4cd8 100644
--- a/arch/arm/boot/dts/dm816x.dtsi
+++ b/arch/arm/boot/dts/dm816x.dtsi
@@ -218,6 +218,7 @@
218 reg = <0x480c8000 0x2000>; 218 reg = <0x480c8000 0x2000>;
219 interrupts = <77>; 219 interrupts = <77>;
220 ti,hwmods = "mailbox"; 220 ti,hwmods = "mailbox";
221 #mbox-cells = <1>;
221 ti,mbox-num-users = <4>; 222 ti,mbox-num-users = <4>;
222 ti,mbox-num-fifos = <12>; 223 ti,mbox-num-fifos = <12>;
223 mbox_dsp: mbox_dsp { 224 mbox_dsp: mbox_dsp {
@@ -279,8 +280,11 @@
279 ti,spi-num-cs = <4>; 280 ti,spi-num-cs = <4>;
280 ti,hwmods = "mcspi1"; 281 ti,hwmods = "mcspi1";
281 dmas = <&edma 16 &edma 17 282 dmas = <&edma 16 &edma 17
282 &edma 18 &edma 19>; 283 &edma 18 &edma 19
283 dma-names = "tx0", "rx0", "tx1", "rx1"; 284 &edma 20 &edma 21
285 &edma 22 &edma 23>;
286 dma-names = "tx0", "rx0", "tx1", "rx1",
287 "tx2", "rx2", "tx3", "rx3";
284 }; 288 };
285 289
286 mmc1: mmc@48060000 { 290 mmc1: mmc@48060000 {
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index bc672fb91466..fe99231cbde5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1459,8 +1459,8 @@
1459 interrupt-names = "tx", "rx"; 1459 interrupt-names = "tx", "rx";
1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>; 1460 dmas = <&sdma_xbar 133>, <&sdma_xbar 132>;
1461 dma-names = "tx", "rx"; 1461 dma-names = "tx", "rx";
1462 clocks = <&mcasp3_ahclkx_mux>; 1462 clocks = <&mcasp3_aux_gfclk_mux>, <&mcasp3_ahclkx_mux>;
1463 clock-names = "fck"; 1463 clock-names = "fck", "ahclkx";
1464 status = "disabled"; 1464 status = "disabled";
1465 }; 1465 };
1466 1466
diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
index 58adf176425a..a51834e1dd27 100644
--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
+++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
@@ -154,7 +154,7 @@
154&fec { 154&fec {
155 pinctrl-names = "default"; 155 pinctrl-names = "default";
156 pinctrl-0 = <&pinctrl_enet>; 156 pinctrl-0 = <&pinctrl_enet>;
157 phy-mode = "rgmii"; 157 phy-mode = "rgmii-id";
158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>; 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
159 status = "okay"; 159 status = "okay";
160}; 160};
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 7b31fdb79ced..dc0cebfe22d7 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -94,7 +94,7 @@
94&fec { 94&fec {
95 pinctrl-names = "default"; 95 pinctrl-names = "default";
96 pinctrl-0 = <&pinctrl_enet>; 96 pinctrl-0 = <&pinctrl_enet>;
97 phy-mode = "rgmii"; 97 phy-mode = "rgmii-id";
98 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 98 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
99 status = "okay"; 99 status = "okay";
100}; 100};
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 1b66328a8498..18cd4114a23e 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -154,7 +154,7 @@
154&fec { 154&fec {
155 pinctrl-names = "default"; 155 pinctrl-names = "default";
156 pinctrl-0 = <&pinctrl_enet>; 156 pinctrl-0 = <&pinctrl_enet>;
157 phy-mode = "rgmii"; 157 phy-mode = "rgmii-id";
158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 158 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
159 status = "okay"; 159 status = "okay";
160}; 160};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 7c51839ff934..eea90f37bbb8 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -155,7 +155,7 @@
155&fec { 155&fec {
156 pinctrl-names = "default"; 156 pinctrl-names = "default";
157 pinctrl-0 = <&pinctrl_enet>; 157 pinctrl-0 = <&pinctrl_enet>;
158 phy-mode = "rgmii"; 158 phy-mode = "rgmii-id";
159 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 159 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
160 status = "okay"; 160 status = "okay";
161}; 161};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 929e0b37bd9e..6c11a2ae35ef 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -145,7 +145,7 @@
145&fec { 145&fec {
146 pinctrl-names = "default"; 146 pinctrl-names = "default";
147 pinctrl-0 = <&pinctrl_enet>; 147 pinctrl-0 = <&pinctrl_enet>;
148 phy-mode = "rgmii"; 148 phy-mode = "rgmii-id";
149 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>; 149 phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
150 status = "okay"; 150 status = "okay";
151}; 151};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 8263fc18a7d9..d354d406954d 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -113,14 +113,14 @@
113&clks { 113&clks {
114 assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>, 114 assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
115 <&clks IMX6QDL_PLL4_BYPASS>, 115 <&clks IMX6QDL_PLL4_BYPASS>,
116 <&clks IMX6QDL_CLK_PLL4_POST_DIV>,
117 <&clks IMX6QDL_CLK_LDB_DI0_SEL>, 116 <&clks IMX6QDL_CLK_LDB_DI0_SEL>,
118 <&clks IMX6QDL_CLK_LDB_DI1_SEL>; 117 <&clks IMX6QDL_CLK_LDB_DI1_SEL>,
118 <&clks IMX6QDL_CLK_PLL4_POST_DIV>;
119 assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>, 119 assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
120 <&clks IMX6QDL_PLL4_BYPASS_SRC>, 120 <&clks IMX6QDL_PLL4_BYPASS_SRC>,
121 <&clks IMX6QDL_CLK_PLL3_USB_OTG>, 121 <&clks IMX6QDL_CLK_PLL3_USB_OTG>,
122 <&clks IMX6QDL_CLK_PLL3_USB_OTG>; 122 <&clks IMX6QDL_CLK_PLL3_USB_OTG>;
123 assigned-clock-rates = <0>, <0>, <24576000>; 123 assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>;
124}; 124};
125 125
126&ecspi1 { 126&ecspi1 {
diff --git a/arch/arm/boot/dts/k2l-netcp.dtsi b/arch/arm/boot/dts/k2l-netcp.dtsi
index 01aef230773d..5acbd0dcc2ab 100644
--- a/arch/arm/boot/dts/k2l-netcp.dtsi
+++ b/arch/arm/boot/dts/k2l-netcp.dtsi
@@ -137,7 +137,7 @@ netcp: netcp@26000000 {
137 /* NetCP address range */ 137 /* NetCP address range */
138 ranges = <0 0x26000000 0x1000000>; 138 ranges = <0 0x26000000 0x1000000>;
139 139
140 clocks = <&papllclk>, <&clkcpgmac>, <&chipclk12>; 140 clocks = <&clkosr>, <&papllclk>, <&clkcpgmac>, <&chipclk12>;
141 dma-coherent; 141 dma-coherent;
142 142
143 ti,navigator-dmas = <&dma_gbe 0>, 143 ti,navigator-dmas = <&dma_gbe 0>,
diff --git a/arch/arm/boot/dts/kirkwood-ts219.dtsi b/arch/arm/boot/dts/kirkwood-ts219.dtsi
index c56ab6bbfe3c..0e46560551f4 100644
--- a/arch/arm/boot/dts/kirkwood-ts219.dtsi
+++ b/arch/arm/boot/dts/kirkwood-ts219.dtsi
@@ -40,7 +40,7 @@
40 }; 40 };
41 poweroff@12100 { 41 poweroff@12100 {
42 compatible = "qnap,power-off"; 42 compatible = "qnap,power-off";
43 reg = <0x12000 0x100>; 43 reg = <0x12100 0x100>;
44 clocks = <&gate_clk 7>; 44 clocks = <&gate_clk 7>;
45 }; 45 };
46 spi@10600 { 46 spi@10600 {
diff --git a/arch/arm/boot/dts/omap4-duovero-parlor.dts b/arch/arm/boot/dts/omap4-duovero-parlor.dts
index 1a78f013f37a..b75f7b2b7c4a 100644
--- a/arch/arm/boot/dts/omap4-duovero-parlor.dts
+++ b/arch/arm/boot/dts/omap4-duovero-parlor.dts
@@ -189,3 +189,7 @@
189 }; 189 };
190}; 190};
191 191
192&uart3 {
193 interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
194 &omap4_pmx_core OMAP4_UART3_RX>;
195};
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index 8fd8ef2c72da..85f0373df498 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -86,6 +86,10 @@
86 }; 86 };
87}; 87};
88 88
89&emmc {
90 /delete-property/mmc-hs200-1_8v;
91};
92
89&gpio_keys { 93&gpio_keys {
90 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>; 94 pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
91 95
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index 6a79c9c526b8..04ea209f1737 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -452,8 +452,10 @@
452 clock-names = "tsadc", "apb_pclk"; 452 clock-names = "tsadc", "apb_pclk";
453 resets = <&cru SRST_TSADC>; 453 resets = <&cru SRST_TSADC>;
454 reset-names = "tsadc-apb"; 454 reset-names = "tsadc-apb";
455 pinctrl-names = "default"; 455 pinctrl-names = "init", "default", "sleep";
456 pinctrl-0 = <&otp_out>; 456 pinctrl-0 = <&otp_gpio>;
457 pinctrl-1 = <&otp_out>;
458 pinctrl-2 = <&otp_gpio>;
457 #thermal-sensor-cells = <1>; 459 #thermal-sensor-cells = <1>;
458 rockchip,hw-tshut-temp = <95000>; 460 rockchip,hw-tshut-temp = <95000>;
459 status = "disabled"; 461 status = "disabled";
@@ -1395,6 +1397,10 @@
1395 }; 1397 };
1396 1398
1397 tsadc { 1399 tsadc {
1400 otp_gpio: otp-gpio {
1401 rockchip,pins = <0 10 RK_FUNC_GPIO &pcfg_pull_none>;
1402 };
1403
1398 otp_out: otp-out { 1404 otp_out: otp-out {
1399 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>; 1405 rockchip,pins = <0 10 RK_FUNC_1 &pcfg_pull_none>;
1400 }; 1406 };
diff --git a/arch/arm/boot/dts/sama5d35ek.dts b/arch/arm/boot/dts/sama5d35ek.dts
index d9a9aca1ccfd..e812f5c1bf70 100644
--- a/arch/arm/boot/dts/sama5d35ek.dts
+++ b/arch/arm/boot/dts/sama5d35ek.dts
@@ -49,7 +49,7 @@
49 label = "pb_user1"; 49 label = "pb_user1";
50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>; 50 gpios = <&pioE 27 GPIO_ACTIVE_HIGH>;
51 linux,code = <0x100>; 51 linux,code = <0x100>;
52 gpio-key,wakeup; 52 wakeup-source;
53 }; 53 };
54 }; 54 };
55}; 55};
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 15bbaf690047..2193637b9cd2 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1300,7 +1300,7 @@
1300 }; 1300 };
1301 1301
1302 watchdog@fc068640 { 1302 watchdog@fc068640 {
1303 compatible = "atmel,at91sam9260-wdt"; 1303 compatible = "atmel,sama5d4-wdt";
1304 reg = <0xfc068640 0x10>; 1304 reg = <0xfc068640 0x10>;
1305 clocks = <&clk32k>; 1305 clocks = <&clk32k>;
1306 status = "disabled"; 1306 status = "disabled";
diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
index 2d4250b1faf8..68b479b8772c 100644
--- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
+++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
@@ -83,6 +83,7 @@
83 reg = <0x5d>; 83 reg = <0x5d>;
84 interrupt-parent = <&pio>; 84 interrupt-parent = <&pio>;
85 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */ 85 interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
86 touchscreen-swapped-x-y;
86 }; 87 };
87}; 88};
88 89
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi
index 40c23a0b7cfc..ec1aa64ded68 100644
--- a/arch/arm/boot/dts/tegra124-nyan.dtsi
+++ b/arch/arm/boot/dts/tegra124-nyan.dtsi
@@ -399,7 +399,7 @@
399 399
400 /* CPU DFLL clock */ 400 /* CPU DFLL clock */
401 clock@0,70110000 { 401 clock@0,70110000 {
402 status = "okay"; 402 status = "disabled";
403 vdd-cpu-supply = <&vdd_cpu>; 403 vdd-cpu-supply = <&vdd_cpu>;
404 nvidia,i2c-fs-rate = <400000>; 404 nvidia,i2c-fs-rate = <400000>;
405 }; 405 };
diff --git a/arch/arm/boot/dts/usb_a9260_common.dtsi b/arch/arm/boot/dts/usb_a9260_common.dtsi
index 12edafefd44a..9beea8976584 100644
--- a/arch/arm/boot/dts/usb_a9260_common.dtsi
+++ b/arch/arm/boot/dts/usb_a9260_common.dtsi
@@ -115,7 +115,7 @@
115 label = "user_pb"; 115 label = "user_pb";
116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 116 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
117 linux,code = <28>; 117 linux,code = <28>;
118 gpio-key,wakeup; 118 wakeup-source;
119 }; 119 };
120 }; 120 };
121 121
diff --git a/arch/arm/boot/dts/usb_a9263.dts b/arch/arm/boot/dts/usb_a9263.dts
index 68c0de36c339..8cc6edb29694 100644
--- a/arch/arm/boot/dts/usb_a9263.dts
+++ b/arch/arm/boot/dts/usb_a9263.dts
@@ -143,7 +143,7 @@
143 label = "user_pb"; 143 label = "user_pb";
144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>; 144 gpios = <&pioB 10 GPIO_ACTIVE_LOW>;
145 linux,code = <28>; 145 linux,code = <28>;
146 gpio-key,wakeup; 146 wakeup-source;
147 }; 147 };
148 }; 148 };
149 149
diff --git a/arch/arm/boot/dts/vf610-colibri.dtsi b/arch/arm/boot/dts/vf610-colibri.dtsi
index 19fe045b8334..2d7eab755210 100644
--- a/arch/arm/boot/dts/vf610-colibri.dtsi
+++ b/arch/arm/boot/dts/vf610-colibri.dtsi
@@ -18,8 +18,3 @@
18 reg = <0x80000000 0x10000000>; 18 reg = <0x80000000 0x10000000>;
19 }; 19 };
20}; 20};
21
22&L2 {
23 arm,data-latency = <2 1 2>;
24 arm,tag-latency = <3 2 3>;
25};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index 5f8eb1bd782b..58bc6e448be5 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -19,7 +19,7 @@
19 reg = <0x40006000 0x1000>; 19 reg = <0x40006000 0x1000>;
20 cache-unified; 20 cache-unified;
21 cache-level = <2>; 21 cache-level = <2>;
22 arm,data-latency = <1 1 1>; 22 arm,data-latency = <3 3 3>;
23 arm,tag-latency = <2 2 2>; 23 arm,tag-latency = <2 2 2>;
24 }; 24 };
25}; 25};
diff --git a/arch/arm/boot/dts/vfxxx.dtsi b/arch/arm/boot/dts/vfxxx.dtsi
index 6736bae43a5b..3cd1b27f2697 100644
--- a/arch/arm/boot/dts/vfxxx.dtsi
+++ b/arch/arm/boot/dts/vfxxx.dtsi
@@ -158,7 +158,7 @@
158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>; 158 interrupts = <67 IRQ_TYPE_LEVEL_HIGH>;
159 clocks = <&clks VF610_CLK_DSPI0>; 159 clocks = <&clks VF610_CLK_DSPI0>;
160 clock-names = "dspi"; 160 clock-names = "dspi";
161 spi-num-chipselects = <5>; 161 spi-num-chipselects = <6>;
162 status = "disabled"; 162 status = "disabled";
163 }; 163 };
164 164
@@ -170,7 +170,7 @@
170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>; 170 interrupts = <68 IRQ_TYPE_LEVEL_HIGH>;
171 clocks = <&clks VF610_CLK_DSPI1>; 171 clocks = <&clks VF610_CLK_DSPI1>;
172 clock-names = "dspi"; 172 clock-names = "dspi";
173 spi-num-chipselects = <5>; 173 spi-num-chipselects = <4>;
174 status = "disabled"; 174 status = "disabled";
175 }; 175 };
176 176
@@ -178,8 +178,10 @@
178 compatible = "fsl,vf610-sai"; 178 compatible = "fsl,vf610-sai";
179 reg = <0x40031000 0x1000>; 179 reg = <0x40031000 0x1000>;
180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>; 180 interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
181 clocks = <&clks VF610_CLK_SAI2>; 181 clocks = <&clks VF610_CLK_SAI2>,
182 clock-names = "sai"; 182 <&clks VF610_CLK_SAI2_DIV>,
183 <&clks 0>, <&clks 0>;
184 clock-names = "bus", "mclk1", "mclk2", "mclk3";
183 dma-names = "tx", "rx"; 185 dma-names = "tx", "rx";
184 dmas = <&edma0 0 21>, 186 dmas = <&edma0 0 21>,
185 <&edma0 0 20>; 187 <&edma0 0 20>;
@@ -461,6 +463,8 @@
461 clock-names = "adc"; 463 clock-names = "adc";
462 #io-channel-cells = <1>; 464 #io-channel-cells = <1>;
463 status = "disabled"; 465 status = "disabled";
466 fsl,adck-max-frequency = <30000000>, <40000000>,
467 <20000000>;
464 }; 468 };
465 469
466 esdhc0: esdhc@400b1000 { 470 esdhc0: esdhc@400b1000 {
@@ -472,8 +476,6 @@
472 <&clks VF610_CLK_ESDHC0>; 476 <&clks VF610_CLK_ESDHC0>;
473 clock-names = "ipg", "ahb", "per"; 477 clock-names = "ipg", "ahb", "per";
474 status = "disabled"; 478 status = "disabled";
475 fsl,adck-max-frequency = <30000000>, <40000000>,
476 <20000000>;
477 }; 479 };
478 480
479 esdhc1: esdhc@400b2000 { 481 esdhc1: esdhc@400b2000 {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index 1b1e5acd76e2..e4b1be66b3f5 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -125,7 +125,6 @@ CONFIG_POWER_RESET=y
125# CONFIG_HWMON is not set 125# CONFIG_HWMON is not set
126CONFIG_WATCHDOG=y 126CONFIG_WATCHDOG=y
127CONFIG_AT91SAM9X_WATCHDOG=y 127CONFIG_AT91SAM9X_WATCHDOG=y
128CONFIG_SSB=m
129CONFIG_MFD_ATMEL_HLCDC=y 128CONFIG_MFD_ATMEL_HLCDC=y
130CONFIG_REGULATOR=y 129CONFIG_REGULATOR=y
131CONFIG_REGULATOR_FIXED_VOLTAGE=y 130CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index a0c57ac88b27..63f7e6ce649a 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -129,7 +129,6 @@ CONFIG_GPIO_SYSFS=y
129CONFIG_POWER_SUPPLY=y 129CONFIG_POWER_SUPPLY=y
130CONFIG_POWER_RESET=y 130CONFIG_POWER_RESET=y
131# CONFIG_HWMON is not set 131# CONFIG_HWMON is not set
132CONFIG_SSB=m
133CONFIG_MFD_ATMEL_FLEXCOM=y 132CONFIG_MFD_ATMEL_FLEXCOM=y
134CONFIG_REGULATOR=y 133CONFIG_REGULATOR=y
135CONFIG_REGULATOR_FIXED_VOLTAGE=y 134CONFIG_REGULATOR_FIXED_VOLTAGE=y
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 6607d976e07d..7da5503c0591 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -21,6 +21,7 @@
21#ifndef __ASSEMBLY__ 21#ifndef __ASSEMBLY__
22 22
23#include <linux/io.h> 23#include <linux/io.h>
24#include <asm/barrier.h>
24 25
25#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2 26#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
26#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm 27#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index be1d07d59ee9..1bd9510de1b9 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -40,6 +40,11 @@ extern void arch_trigger_all_cpu_backtrace(bool);
40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x) 40#define arch_trigger_all_cpu_backtrace(x) arch_trigger_all_cpu_backtrace(x)
41#endif 41#endif
42 42
43static inline int nr_legacy_irqs(void)
44{
45 return NR_IRQS_LEGACY;
46}
47
43#endif 48#endif
44 49
45#endif 50#endif
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index a9c80a2ea1a7..3095df091ff8 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -28,6 +28,18 @@
28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); 28unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); 29unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);
30 30
31static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
32 u8 reg_num)
33{
34 return *vcpu_reg(vcpu, reg_num);
35}
36
37static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
38 unsigned long val)
39{
40 *vcpu_reg(vcpu, reg_num) = val;
41}
42
31bool kvm_condition_valid(struct kvm_vcpu *vcpu); 43bool kvm_condition_valid(struct kvm_vcpu *vcpu);
32void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); 44void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
33void kvm_inject_undefined(struct kvm_vcpu *vcpu); 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 8cc85a4ebec2..35c9db857ebe 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
510static inline unsigned long __must_check 510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n) 511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{ 512{
513#ifndef CONFIG_UACCESS_WITH_MEMCPY
513 unsigned int __ua_flags = uaccess_save_and_enable(); 514 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n); 515 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags); 516 uaccess_restore(__ua_flags);
516 return n; 517 return n;
518#else
519 return arm_copy_to_user(to, from, n);
520#endif
517} 521}
518 522
519extern unsigned long __must_check 523extern unsigned long __must_check
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 7a2a32a1d5a8..ede692ffa32e 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -416,6 +416,7 @@
416#define __NR_execveat (__NR_SYSCALL_BASE+387) 416#define __NR_execveat (__NR_SYSCALL_BASE+387)
417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388) 417#define __NR_userfaultfd (__NR_SYSCALL_BASE+388)
418#define __NR_membarrier (__NR_SYSCALL_BASE+389) 418#define __NR_membarrier (__NR_SYSCALL_BASE+389)
419#define __NR_mlock2 (__NR_SYSCALL_BASE+390)
419 420
420/* 421/*
421 * The following SWIs are ARM private. 422 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 6551d28c27e6..066f7f9ba411 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -17,11 +17,6 @@
17#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
18 18
19static int debug_pci; 19static int debug_pci;
20static resource_size_t (*align_resource)(struct pci_dev *dev,
21 const struct resource *res,
22 resource_size_t start,
23 resource_size_t size,
24 resource_size_t align) = NULL;
25 20
26/* 21/*
27 * We can't use pci_get_device() here since we are 22 * We can't use pci_get_device() here since we are
@@ -461,7 +456,6 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
461 sys->busnr = busnr; 456 sys->busnr = busnr;
462 sys->swizzle = hw->swizzle; 457 sys->swizzle = hw->swizzle;
463 sys->map_irq = hw->map_irq; 458 sys->map_irq = hw->map_irq;
464 align_resource = hw->align_resource;
465 INIT_LIST_HEAD(&sys->resources); 459 INIT_LIST_HEAD(&sys->resources);
466 460
467 if (hw->private_data) 461 if (hw->private_data)
@@ -470,6 +464,8 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
470 ret = hw->setup(nr, sys); 464 ret = hw->setup(nr, sys);
471 465
472 if (ret > 0) { 466 if (ret > 0) {
467 struct pci_host_bridge *host_bridge;
468
473 ret = pcibios_init_resources(nr, sys); 469 ret = pcibios_init_resources(nr, sys);
474 if (ret) { 470 if (ret) {
475 kfree(sys); 471 kfree(sys);
@@ -491,6 +487,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
491 busnr = sys->bus->busn_res.end + 1; 487 busnr = sys->bus->busn_res.end + 1;
492 488
493 list_add(&sys->node, head); 489 list_add(&sys->node, head);
490
491 host_bridge = pci_find_host_bridge(sys->bus);
492 host_bridge->align_resource = hw->align_resource;
494 } else { 493 } else {
495 kfree(sys); 494 kfree(sys);
496 if (ret < 0) 495 if (ret < 0)
@@ -578,14 +577,18 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
578{ 577{
579 struct pci_dev *dev = data; 578 struct pci_dev *dev = data;
580 resource_size_t start = res->start; 579 resource_size_t start = res->start;
580 struct pci_host_bridge *host_bridge;
581 581
582 if (res->flags & IORESOURCE_IO && start & 0x300) 582 if (res->flags & IORESOURCE_IO && start & 0x300)
583 start = (start + 0x3ff) & ~0x3ff; 583 start = (start + 0x3ff) & ~0x3ff;
584 584
585 start = (start + align - 1) & ~(align - 1); 585 start = (start + align - 1) & ~(align - 1);
586 586
587 if (align_resource) 587 host_bridge = pci_find_host_bridge(dev->bus);
588 return align_resource(dev, res, start, size, align); 588
589 if (host_bridge->align_resource)
590 return host_bridge->align_resource(dev, res,
591 start, size, align);
589 592
590 return start; 593 return start;
591} 594}
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index fde6c88d560c..ac368bb068d1 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -399,6 +399,7 @@
399 CALL(sys_execveat) 399 CALL(sys_execveat)
400 CALL(sys_userfaultfd) 400 CALL(sys_userfaultfd)
401 CALL(sys_membarrier) 401 CALL(sys_membarrier)
402 CALL(sys_mlock2)
402#ifndef syscalls_counted 403#ifndef syscalls_counted
403.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 404.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
404#define syscalls_counted 405#define syscalls_counted
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7a7c4cea5523..4adfb46e3ee9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
95{ 95{
96 unsigned long flags; 96 unsigned long flags;
97 char buf[64]; 97 char buf[64];
98#ifndef CONFIG_CPU_V7M
99 unsigned int domain;
100#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101 /*
102 * Get the domain register for the parent context. In user
103 * mode, we don't save the DACR, so lets use what it should
104 * be. For other modes, we place it after the pt_regs struct.
105 */
106 if (user_mode(regs))
107 domain = DACR_UACCESS_ENABLE;
108 else
109 domain = *(unsigned int *)(regs + 1);
110#else
111 domain = get_domain();
112#endif
113#endif
98 114
99 show_regs_print_info(KERN_DEFAULT); 115 show_regs_print_info(KERN_DEFAULT);
100 116
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
123 139
124#ifndef CONFIG_CPU_V7M 140#ifndef CONFIG_CPU_V7M
125 { 141 {
126 unsigned int domain = get_domain();
127 const char *segment; 142 const char *segment;
128 143
129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) == 144 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) 145 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none"; 146 segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
163 buf[0] = '\0'; 166 buf[0] = '\0';
164#ifdef CONFIG_CPU_CP15_MMU 167#ifdef CONFIG_CPU_CP15_MMU
165 { 168 {
166 unsigned int transbase, dac = get_domain(); 169 unsigned int transbase;
167 asm("mrc p15, 0, %0, c2, c0\n\t" 170 asm("mrc p15, 0, %0, c2, c0\n\t"
168 : "=r" (transbase)); 171 : "=r" (transbase));
169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 172 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
170 transbase, dac); 173 transbase, domain);
171 } 174 }
172#endif 175#endif
173 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 176 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5b26e7efa9ea..c3fe769d7558 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -36,10 +36,10 @@
36 */ 36 */
37#define __user_swpX_asm(data, addr, res, temp, B) \ 37#define __user_swpX_asm(data, addr, res, temp, B) \
38 __asm__ __volatile__( \ 38 __asm__ __volatile__( \
39 " mov %2, %1\n" \ 39 "0: ldrex"B" %2, [%3]\n" \
40 "0: ldrex"B" %1, [%3]\n" \ 40 "1: strex"B" %0, %1, [%3]\n" \
41 "1: strex"B" %0, %2, [%3]\n" \
42 " cmp %0, #0\n" \ 41 " cmp %0, #0\n" \
42 " moveq %1, %2\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .text.fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2435b8..e06fd299de08 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -564,17 +564,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
564 vcpu_sleep(vcpu); 564 vcpu_sleep(vcpu);
565 565
566 /* 566 /*
567 * Disarming the background timer must be done in a
568 * preemptible context, as this call may sleep.
569 */
570 kvm_timer_flush_hwstate(vcpu);
571
572 /*
573 * Preparing the interrupts to be injected also 567 * Preparing the interrupts to be injected also
574 * involves poking the GIC, which must be done in a 568 * involves poking the GIC, which must be done in a
575 * non-preemptible context. 569 * non-preemptible context.
576 */ 570 */
577 preempt_disable(); 571 preempt_disable();
572 kvm_timer_flush_hwstate(vcpu);
578 kvm_vgic_flush_hwstate(vcpu); 573 kvm_vgic_flush_hwstate(vcpu);
579 574
580 local_irq_disable(); 575 local_irq_disable();
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 974b1c606d04..3a10c9f1d0a4 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -115,7 +115,7 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, 115 trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
116 data); 116 data);
117 data = vcpu_data_host_to_guest(vcpu, data, len); 117 data = vcpu_data_host_to_guest(vcpu, data, len);
118 *vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt) = data; 118 vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
119 } 119 }
120 120
121 return 0; 121 return 0;
@@ -186,7 +186,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
186 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
187 187
188 if (is_write) { 188 if (is_write) {
189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len); 189 data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
190 len);
190 191
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data); 192 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data); 193 mmio_write_buf(data_buf, len, data);
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342da13d..61d96a645ff3 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
98 __kvm_flush_dcache_pud(pud); 98 __kvm_flush_dcache_pud(pud);
99} 99}
100 100
101static bool kvm_is_device_pfn(unsigned long pfn)
102{
103 return !pfn_valid(pfn);
104}
105
101/** 106/**
102 * stage2_dissolve_pmd() - clear and flush huge PMD entry 107 * stage2_dissolve_pmd() - clear and flush huge PMD entry
103 * @kvm: pointer to kvm structure. 108 * @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
213 kvm_tlb_flush_vmid_ipa(kvm, addr); 218 kvm_tlb_flush_vmid_ipa(kvm, addr);
214 219
215 /* No need to invalidate the cache for device mappings */ 220 /* No need to invalidate the cache for device mappings */
216 if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) 221 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
217 kvm_flush_dcache_pte(old_pte); 222 kvm_flush_dcache_pte(old_pte);
218 223
219 put_page(virt_to_page(pte)); 224 put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
305 310
306 pte = pte_offset_kernel(pmd, addr); 311 pte = pte_offset_kernel(pmd, addr);
307 do { 312 do {
308 if (!pte_none(*pte) && 313 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
309 (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
310 kvm_flush_dcache_pte(*pte); 314 kvm_flush_dcache_pte(*pte);
311 } while (pte++, addr += PAGE_SIZE, addr != end); 315 } while (pte++, addr += PAGE_SIZE, addr != end);
312} 316}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1037 return kvm_vcpu_dabt_iswrite(vcpu); 1041 return kvm_vcpu_dabt_iswrite(vcpu);
1038} 1042}
1039 1043
1040static bool kvm_is_device_pfn(unsigned long pfn)
1041{
1042 return !pfn_valid(pfn);
1043}
1044
1045/** 1044/**
1046 * stage2_wp_ptes - write protect PMD range 1045 * stage2_wp_ptes - write protect PMD range
1047 * @pmd: pointer to pmd entry 1046 * @pmd: pointer to pmd entry
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index 0b556968a6da..a9b3b905e661 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -75,7 +75,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
75 unsigned long context_id; 75 unsigned long context_id;
76 phys_addr_t target_pc; 76 phys_addr_t target_pc;
77 77
78 cpu_id = *vcpu_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK; 78 cpu_id = vcpu_get_reg(source_vcpu, 1) & MPIDR_HWID_BITMASK;
79 if (vcpu_mode_is_32bit(source_vcpu)) 79 if (vcpu_mode_is_32bit(source_vcpu))
80 cpu_id &= ~((u32) 0); 80 cpu_id &= ~((u32) 0);
81 81
@@ -94,8 +94,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
94 return PSCI_RET_INVALID_PARAMS; 94 return PSCI_RET_INVALID_PARAMS;
95 } 95 }
96 96
97 target_pc = *vcpu_reg(source_vcpu, 2); 97 target_pc = vcpu_get_reg(source_vcpu, 2);
98 context_id = *vcpu_reg(source_vcpu, 3); 98 context_id = vcpu_get_reg(source_vcpu, 3);
99 99
100 kvm_reset_vcpu(vcpu); 100 kvm_reset_vcpu(vcpu);
101 101
@@ -114,7 +114,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
114 * NOTE: We always update r0 (or x0) because for PSCI v0.1 114 * NOTE: We always update r0 (or x0) because for PSCI v0.1
115 * the general puspose registers are undefined upon CPU_ON. 115 * the general puspose registers are undefined upon CPU_ON.
116 */ 116 */
117 *vcpu_reg(vcpu, 0) = context_id; 117 vcpu_set_reg(vcpu, 0, context_id);
118 vcpu->arch.power_off = false; 118 vcpu->arch.power_off = false;
119 smp_mb(); /* Make sure the above is visible */ 119 smp_mb(); /* Make sure the above is visible */
120 120
@@ -134,8 +134,8 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
134 struct kvm *kvm = vcpu->kvm; 134 struct kvm *kvm = vcpu->kvm;
135 struct kvm_vcpu *tmp; 135 struct kvm_vcpu *tmp;
136 136
137 target_affinity = *vcpu_reg(vcpu, 1); 137 target_affinity = vcpu_get_reg(vcpu, 1);
138 lowest_affinity_level = *vcpu_reg(vcpu, 2); 138 lowest_affinity_level = vcpu_get_reg(vcpu, 2);
139 139
140 /* Determine target affinity mask */ 140 /* Determine target affinity mask */
141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level); 141 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
@@ -209,7 +209,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu) 209static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
210{ 210{
211 int ret = 1; 211 int ret = 1;
212 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 212 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
213 unsigned long val; 213 unsigned long val;
214 214
215 switch (psci_fn) { 215 switch (psci_fn) {
@@ -273,13 +273,13 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
273 break; 273 break;
274 } 274 }
275 275
276 *vcpu_reg(vcpu, 0) = val; 276 vcpu_set_reg(vcpu, 0, val);
277 return ret; 277 return ret;
278} 278}
279 279
280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu) 280static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
281{ 281{
282 unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); 282 unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
283 unsigned long val; 283 unsigned long val;
284 284
285 switch (psci_fn) { 285 switch (psci_fn) {
@@ -295,7 +295,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
295 break; 295 break;
296 } 296 }
297 297
298 *vcpu_reg(vcpu, 0) = val; 298 vcpu_set_reg(vcpu, 0, val);
299 return 1; 299 return 1;
300} 300}
301 301
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b90905132..588bbc288396 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
88static unsigned long noinline 88static unsigned long noinline
89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) 89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
90{ 90{
91 unsigned long ua_flags;
91 int atomic; 92 int atomic;
92 93
93 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
118 if (tocopy > n) 119 if (tocopy > n)
119 tocopy = n; 120 tocopy = n;
120 121
122 ua_flags = uaccess_save_and_enable();
121 memcpy((void *)to, from, tocopy); 123 memcpy((void *)to, from, tocopy);
124 uaccess_restore(ua_flags);
122 to += tocopy; 125 to += tocopy;
123 from += tocopy; 126 from += tocopy;
124 n -= tocopy; 127 n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145 * With frame pointer disabled, tail call optimization kicks in 148 * With frame pointer disabled, tail call optimization kicks in
146 * as well making this test almost invisible. 149 * as well making this test almost invisible.
147 */ 150 */
148 if (n < 64) 151 if (n < 64) {
149 return __copy_to_user_std(to, from, n); 152 unsigned long ua_flags = uaccess_save_and_enable();
150 return __copy_to_user_memcpy(to, from, n); 153 n = __copy_to_user_std(to, from, n);
154 uaccess_restore(ua_flags);
155 } else {
156 n = __copy_to_user_memcpy(to, from, n);
157 }
158 return n;
151} 159}
152 160
153static unsigned long noinline 161static unsigned long noinline
154__clear_user_memset(void __user *addr, unsigned long n) 162__clear_user_memset(void __user *addr, unsigned long n)
155{ 163{
164 unsigned long ua_flags;
165
156 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 166 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
157 memset((void *)addr, 0, n); 167 memset((void *)addr, 0, n);
158 return 0; 168 return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
175 if (tocopy > n) 185 if (tocopy > n)
176 tocopy = n; 186 tocopy = n;
177 187
188 ua_flags = uaccess_save_and_enable();
178 memset((void *)addr, 0, tocopy); 189 memset((void *)addr, 0, tocopy);
190 uaccess_restore(ua_flags);
179 addr += tocopy; 191 addr += tocopy;
180 n -= tocopy; 192 n -= tocopy;
181 193
@@ -193,9 +205,14 @@ out:
193unsigned long arm_clear_user(void __user *addr, unsigned long n) 205unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 206{
195 /* See rational for this in __copy_to_user() above. */ 207 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 208 if (n < 64) {
197 return __clear_user_std(addr, n); 209 unsigned long ua_flags = uaccess_save_and_enable();
198 return __clear_user_memset(addr, n); 210 n = __clear_user_std(addr, n);
211 uaccess_restore(ua_flags);
212 } else {
213 n = __clear_user_memset(addr, n);
214 }
215 return n;
199} 216}
200 217
201#if 0 218#if 0
diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig
index 92673006e55c..28656c2b54a0 100644
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
@@ -4,7 +4,6 @@ menuconfig ARCH_AT91
4 select ARCH_REQUIRE_GPIOLIB 4 select ARCH_REQUIRE_GPIOLIB
5 select COMMON_CLK_AT91 5 select COMMON_CLK_AT91
6 select PINCTRL 6 select PINCTRL
7 select PINCTRL_AT91
8 select SOC_BUS 7 select SOC_BUS
9 8
10if ARCH_AT91 9if ARCH_AT91
@@ -17,6 +16,7 @@ config SOC_SAMA5D2
17 select HAVE_AT91_USB_CLK 16 select HAVE_AT91_USB_CLK
18 select HAVE_AT91_H32MX 17 select HAVE_AT91_H32MX
19 select HAVE_AT91_GENERATED_CLK 18 select HAVE_AT91_GENERATED_CLK
19 select PINCTRL_AT91PIO4
20 help 20 help
21 Select this if ou are using one of Atmel's SAMA5D2 family SoC. 21 Select this if ou are using one of Atmel's SAMA5D2 family SoC.
22 22
@@ -27,6 +27,7 @@ config SOC_SAMA5D3
27 select HAVE_AT91_UTMI 27 select HAVE_AT91_UTMI
28 select HAVE_AT91_SMD 28 select HAVE_AT91_SMD
29 select HAVE_AT91_USB_CLK 29 select HAVE_AT91_USB_CLK
30 select PINCTRL_AT91
30 help 31 help
31 Select this if you are using one of Atmel's SAMA5D3 family SoC. 32 Select this if you are using one of Atmel's SAMA5D3 family SoC.
32 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36. 33 This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
@@ -40,6 +41,7 @@ config SOC_SAMA5D4
40 select HAVE_AT91_SMD 41 select HAVE_AT91_SMD
41 select HAVE_AT91_USB_CLK 42 select HAVE_AT91_USB_CLK
42 select HAVE_AT91_H32MX 43 select HAVE_AT91_H32MX
44 select PINCTRL_AT91
43 help 45 help
44 Select this if you are using one of Atmel's SAMA5D4 family SoC. 46 Select this if you are using one of Atmel's SAMA5D4 family SoC.
45 47
@@ -50,6 +52,7 @@ config SOC_AT91RM9200
50 select CPU_ARM920T 52 select CPU_ARM920T
51 select HAVE_AT91_USB_CLK 53 select HAVE_AT91_USB_CLK
52 select MIGHT_HAVE_PCI 54 select MIGHT_HAVE_PCI
55 select PINCTRL_AT91
53 select SOC_SAM_V4_V5 56 select SOC_SAM_V4_V5
54 select SRAM if PM 57 select SRAM if PM
55 help 58 help
@@ -65,6 +68,7 @@ config SOC_AT91SAM9
65 select HAVE_AT91_UTMI 68 select HAVE_AT91_UTMI
66 select HAVE_FB_ATMEL 69 select HAVE_FB_ATMEL
67 select MEMORY 70 select MEMORY
71 select PINCTRL_AT91
68 select SOC_SAM_V4_V5 72 select SOC_SAM_V4_V5
69 select SRAM if PM 73 select SRAM if PM
70 help 74 help
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 80e277cfcc8b..23726fb31741 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -41,8 +41,10 @@
41 * implementation should be moved down into the pinctrl driver and get 41 * implementation should be moved down into the pinctrl driver and get
42 * called as part of the generic suspend/resume path. 42 * called as part of the generic suspend/resume path.
43 */ 43 */
44#ifdef CONFIG_PINCTRL_AT91
44extern void at91_pinctrl_gpio_suspend(void); 45extern void at91_pinctrl_gpio_suspend(void);
45extern void at91_pinctrl_gpio_resume(void); 46extern void at91_pinctrl_gpio_resume(void);
47#endif
46 48
47static struct { 49static struct {
48 unsigned long uhp_udp_mask; 50 unsigned long uhp_udp_mask;
@@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
151 153
152static int at91_pm_enter(suspend_state_t state) 154static int at91_pm_enter(suspend_state_t state)
153{ 155{
156#ifdef CONFIG_PINCTRL_AT91
154 at91_pinctrl_gpio_suspend(); 157 at91_pinctrl_gpio_suspend();
155 158#endif
156 switch (state) { 159 switch (state) {
157 /* 160 /*
158 * Suspend-to-RAM is like STANDBY plus slow clock mode, so 161 * Suspend-to-RAM is like STANDBY plus slow clock mode, so
@@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
192error: 195error:
193 target_state = PM_SUSPEND_ON; 196 target_state = PM_SUSPEND_ON;
194 197
198#ifdef CONFIG_PINCTRL_AT91
195 at91_pinctrl_gpio_resume(); 199 at91_pinctrl_gpio_resume();
200#endif
196 return 0; 201 return 0;
197} 202}
198 203
diff --git a/arch/arm/mach-dove/include/mach/entry-macro.S b/arch/arm/mach-dove/include/mach/entry-macro.S
index 72d622baaad3..df1d44bdc375 100644
--- a/arch/arm/mach-dove/include/mach/entry-macro.S
+++ b/arch/arm/mach-dove/include/mach/entry-macro.S
@@ -18,13 +18,13 @@
18 @ check low interrupts 18 @ check low interrupts
19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF] 19 ldr \irqstat, [\base, #IRQ_CAUSE_LOW_OFF]
20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF] 20 ldr \tmp, [\base, #IRQ_MASK_LOW_OFF]
21 mov \irqnr, #31 21 mov \irqnr, #32
22 ands \irqstat, \irqstat, \tmp 22 ands \irqstat, \irqstat, \tmp
23 23
24 @ if no low interrupts set, check high interrupts 24 @ if no low interrupts set, check high interrupts
25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF] 25 ldreq \irqstat, [\base, #IRQ_CAUSE_HIGH_OFF]
26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF] 26 ldreq \tmp, [\base, #IRQ_MASK_HIGH_OFF]
27 moveq \irqnr, #63 27 moveq \irqnr, #64
28 andeqs \irqstat, \irqstat, \tmp 28 andeqs \irqstat, \irqstat, \tmp
29 29
30 @ find first active interrupt source 30 @ find first active interrupt source
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index de68938ee6aa..c21e41dad19c 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
748void exynos_sys_powerdown_conf(enum sys_powerdown mode) 748void exynos_sys_powerdown_conf(enum sys_powerdown mode)
749{ 749{
750 unsigned int i; 750 unsigned int i;
751 const struct exynos_pmu_data *pmu_data;
752
753 if (!pmu_context)
754 return;
751 755
752 const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data; 756 pmu_data = pmu_context->pmu_data;
753 757
754 if (pmu_data->powerdown_conf) 758 if (pmu_data->powerdown_conf)
755 pmu_data->powerdown_conf(mode); 759 pmu_data->powerdown_conf(mode);
diff --git a/arch/arm/mach-imx/gpc.c b/arch/arm/mach-imx/gpc.c
index 8e7976a4c3e7..cfc696b972f3 100644
--- a/arch/arm/mach-imx/gpc.c
+++ b/arch/arm/mach-imx/gpc.c
@@ -177,6 +177,7 @@ static struct irq_chip imx_gpc_chip = {
177 .irq_unmask = imx_gpc_irq_unmask, 177 .irq_unmask = imx_gpc_irq_unmask,
178 .irq_retrigger = irq_chip_retrigger_hierarchy, 178 .irq_retrigger = irq_chip_retrigger_hierarchy,
179 .irq_set_wake = imx_gpc_irq_set_wake, 179 .irq_set_wake = imx_gpc_irq_set_wake,
180 .irq_set_type = irq_chip_set_type_parent,
180#ifdef CONFIG_SMP 181#ifdef CONFIG_SMP
181 .irq_set_affinity = irq_chip_set_affinity_parent, 182 .irq_set_affinity = irq_chip_set_affinity_parent,
182#endif 183#endif
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index b02439019963..7a0c13bf4269 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
143 writel(*vaddr++, bus_addr); 143 writel(*vaddr++, bus_addr);
144} 144}
145 145
146static inline unsigned char __indirect_readb(const volatile void __iomem *p) 146static inline u8 __indirect_readb(const volatile void __iomem *p)
147{ 147{
148 u32 addr = (u32)p; 148 u32 addr = (u32)p;
149 u32 n, byte_enables, data; 149 u32 n, byte_enables, data;
@@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
166 *vaddr++ = readb(bus_addr); 166 *vaddr++ = readb(bus_addr);
167} 167}
168 168
169static inline unsigned short __indirect_readw(const volatile void __iomem *p) 169static inline u16 __indirect_readw(const volatile void __iomem *p)
170{ 170{
171 u32 addr = (u32)p; 171 u32 addr = (u32)p;
172 u32 n, byte_enables, data; 172 u32 n, byte_enables, data;
@@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
189 *vaddr++ = readw(bus_addr); 189 *vaddr++ = readw(bus_addr);
190} 190}
191 191
192static inline unsigned long __indirect_readl(const volatile void __iomem *p) 192static inline u32 __indirect_readl(const volatile void __iomem *p)
193{ 193{
194 u32 addr = (__force u32)p; 194 u32 addr = (__force u32)p;
195 u32 data; 195 u32 data;
@@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET))) 350 ((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
351 351
352#define ioread8(p) ioread8(p) 352#define ioread8(p) ioread8(p)
353static inline unsigned int ioread8(const void __iomem *addr) 353static inline u8 ioread8(const void __iomem *addr)
354{ 354{
355 unsigned long port = (unsigned long __force)addr; 355 unsigned long port = (unsigned long __force)addr;
356 if (__is_io_address(port)) 356 if (__is_io_address(port))
@@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
378} 378}
379 379
380#define ioread16(p) ioread16(p) 380#define ioread16(p) ioread16(p)
381static inline unsigned int ioread16(const void __iomem *addr) 381static inline u16 ioread16(const void __iomem *addr)
382{ 382{
383 unsigned long port = (unsigned long __force)addr; 383 unsigned long port = (unsigned long __force)addr;
384 if (__is_io_address(port)) 384 if (__is_io_address(port))
@@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
407} 407}
408 408
409#define ioread32(p) ioread32(p) 409#define ioread32(p) ioread32(p)
410static inline unsigned int ioread32(const void __iomem *addr) 410static inline u32 ioread32(const void __iomem *addr)
411{ 411{
412 unsigned long port = (unsigned long __force)addr; 412 unsigned long port = (unsigned long __force)addr;
413 if (__is_io_address(port)) 413 if (__is_io_address(port))
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 5076d3f334d2..0517f0c1581a 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -65,6 +65,8 @@ config SOC_AM43XX
65 select MACH_OMAP_GENERIC 65 select MACH_OMAP_GENERIC
66 select MIGHT_HAVE_CACHE_L2X0 66 select MIGHT_HAVE_CACHE_L2X0
67 select HAVE_ARM_SCU 67 select HAVE_ARM_SCU
68 select GENERIC_CLOCKEVENTS_BROADCAST
69 select HAVE_ARM_TWD
68 70
69config SOC_DRA7XX 71config SOC_DRA7XX
70 bool "TI DRA7XX" 72 bool "TI DRA7XX"
@@ -121,6 +123,7 @@ config ARCH_OMAP2PLUS_TYPICAL
121 select NEON if CPU_V7 123 select NEON if CPU_V7
122 select PM 124 select PM
123 select REGULATOR 125 select REGULATOR
126 select REGULATOR_FIXED_VOLTAGE
124 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 127 select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
125 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 128 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
126 select VFP 129 select VFP
@@ -201,7 +204,6 @@ config MACH_OMAP3_PANDORA
201 depends on ARCH_OMAP3 204 depends on ARCH_OMAP3
202 default y 205 default y
203 select OMAP_PACKAGE_CBB 206 select OMAP_PACKAGE_CBB
204 select REGULATOR_FIXED_VOLTAGE if REGULATOR
205 207
206config MACH_NOKIA_N810 208config MACH_NOKIA_N810
207 bool 209 bool
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 5305ec7341ec..79e1f876d1c9 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -143,9 +143,9 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
143 * Ensure that CPU power state is set to ON to avoid CPU 143 * Ensure that CPU power state is set to ON to avoid CPU
144 * powerdomain transition on wfi 144 * powerdomain transition on wfi
145 */ 145 */
146 clkdm_wakeup(cpu1_clkdm); 146 clkdm_wakeup_nolock(cpu1_clkdm);
147 omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON); 147 pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
148 clkdm_allow_idle(cpu1_clkdm); 148 clkdm_allow_idle_nolock(cpu1_clkdm);
149 149
150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) { 150 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
151 while (gic_dist_disabled()) { 151 while (gic_dist_disabled()) {
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cc8a987149e2..48495ad82aba 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -890,6 +890,36 @@ static int _init_opt_clks(struct omap_hwmod *oh)
890 return ret; 890 return ret;
891} 891}
892 892
893static void _enable_optional_clocks(struct omap_hwmod *oh)
894{
895 struct omap_hwmod_opt_clk *oc;
896 int i;
897
898 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
899
900 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
901 if (oc->_clk) {
902 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
903 __clk_get_name(oc->_clk));
904 clk_enable(oc->_clk);
905 }
906}
907
908static void _disable_optional_clocks(struct omap_hwmod *oh)
909{
910 struct omap_hwmod_opt_clk *oc;
911 int i;
912
913 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
914
915 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
916 if (oc->_clk) {
917 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
918 __clk_get_name(oc->_clk));
919 clk_disable(oc->_clk);
920 }
921}
922
893/** 923/**
894 * _enable_clocks - enable hwmod main clock and interface clocks 924 * _enable_clocks - enable hwmod main clock and interface clocks
895 * @oh: struct omap_hwmod * 925 * @oh: struct omap_hwmod *
@@ -917,6 +947,9 @@ static int _enable_clocks(struct omap_hwmod *oh)
917 clk_enable(os->_clk); 947 clk_enable(os->_clk);
918 } 948 }
919 949
950 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
951 _enable_optional_clocks(oh);
952
920 /* The opt clocks are controlled by the device driver. */ 953 /* The opt clocks are controlled by the device driver. */
921 954
922 return 0; 955 return 0;
@@ -948,41 +981,14 @@ static int _disable_clocks(struct omap_hwmod *oh)
948 clk_disable(os->_clk); 981 clk_disable(os->_clk);
949 } 982 }
950 983
984 if (oh->flags & HWMOD_OPT_CLKS_NEEDED)
985 _disable_optional_clocks(oh);
986
951 /* The opt clocks are controlled by the device driver. */ 987 /* The opt clocks are controlled by the device driver. */
952 988
953 return 0; 989 return 0;
954} 990}
955 991
956static void _enable_optional_clocks(struct omap_hwmod *oh)
957{
958 struct omap_hwmod_opt_clk *oc;
959 int i;
960
961 pr_debug("omap_hwmod: %s: enabling optional clocks\n", oh->name);
962
963 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
964 if (oc->_clk) {
965 pr_debug("omap_hwmod: enable %s:%s\n", oc->role,
966 __clk_get_name(oc->_clk));
967 clk_enable(oc->_clk);
968 }
969}
970
971static void _disable_optional_clocks(struct omap_hwmod *oh)
972{
973 struct omap_hwmod_opt_clk *oc;
974 int i;
975
976 pr_debug("omap_hwmod: %s: disabling optional clocks\n", oh->name);
977
978 for (i = oh->opt_clks_cnt, oc = oh->opt_clks; i > 0; i--, oc++)
979 if (oc->_clk) {
980 pr_debug("omap_hwmod: disable %s:%s\n", oc->role,
981 __clk_get_name(oc->_clk));
982 clk_disable(oc->_clk);
983 }
984}
985
986/** 992/**
987 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4 993 * _omap4_enable_module - enable CLKCTRL modulemode on OMAP4
988 * @oh: struct omap_hwmod * 994 * @oh: struct omap_hwmod *
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index ca6df1a73475..76bce11c85a4 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -523,6 +523,8 @@ struct omap_hwmod_omap4_prcm {
523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up 523 * HWMOD_RECONFIG_IO_CHAIN: omap_hwmod code needs to reconfigure wake-up
524 * events by calling _reconfigure_io_chain() when a device is enabled 524 * events by calling _reconfigure_io_chain() when a device is enabled
525 * or idled. 525 * or idled.
526 * HWMOD_OPT_CLKS_NEEDED: The optional clocks are needed for the module to
527 * operate and they need to be handled at the same time as the main_clk.
526 */ 528 */
527#define HWMOD_SWSUP_SIDLE (1 << 0) 529#define HWMOD_SWSUP_SIDLE (1 << 0)
528#define HWMOD_SWSUP_MSTANDBY (1 << 1) 530#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -538,6 +540,7 @@ struct omap_hwmod_omap4_prcm {
538#define HWMOD_FORCE_MSTANDBY (1 << 11) 540#define HWMOD_FORCE_MSTANDBY (1 << 11)
539#define HWMOD_SWSUP_SIDLE_ACT (1 << 12) 541#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
540#define HWMOD_RECONFIG_IO_CHAIN (1 << 13) 542#define HWMOD_RECONFIG_IO_CHAIN (1 << 13)
543#define HWMOD_OPT_CLKS_NEEDED (1 << 14)
541 544
542/* 545/*
543 * omap_hwmod._int_flags definitions 546 * omap_hwmod._int_flags definitions
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 51d1ecb384bd..ee4e04434a94 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1298,6 +1298,44 @@ static struct omap_hwmod dra7xx_mcspi4_hwmod = {
1298}; 1298};
1299 1299
1300/* 1300/*
1301 * 'mcasp' class
1302 *
1303 */
1304static struct omap_hwmod_class_sysconfig dra7xx_mcasp_sysc = {
1305 .sysc_offs = 0x0004,
1306 .sysc_flags = SYSC_HAS_SIDLEMODE,
1307 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
1308 .sysc_fields = &omap_hwmod_sysc_type3,
1309};
1310
1311static struct omap_hwmod_class dra7xx_mcasp_hwmod_class = {
1312 .name = "mcasp",
1313 .sysc = &dra7xx_mcasp_sysc,
1314};
1315
1316/* mcasp3 */
1317static struct omap_hwmod_opt_clk mcasp3_opt_clks[] = {
1318 { .role = "ahclkx", .clk = "mcasp3_ahclkx_mux" },
1319};
1320
1321static struct omap_hwmod dra7xx_mcasp3_hwmod = {
1322 .name = "mcasp3",
1323 .class = &dra7xx_mcasp_hwmod_class,
1324 .clkdm_name = "l4per2_clkdm",
1325 .main_clk = "mcasp3_aux_gfclk_mux",
1326 .flags = HWMOD_OPT_CLKS_NEEDED,
1327 .prcm = {
1328 .omap4 = {
1329 .clkctrl_offs = DRA7XX_CM_L4PER2_MCASP3_CLKCTRL_OFFSET,
1330 .context_offs = DRA7XX_RM_L4PER2_MCASP3_CONTEXT_OFFSET,
1331 .modulemode = MODULEMODE_SWCTRL,
1332 },
1333 },
1334 .opt_clks = mcasp3_opt_clks,
1335 .opt_clks_cnt = ARRAY_SIZE(mcasp3_opt_clks),
1336};
1337
1338/*
1301 * 'mmc' class 1339 * 'mmc' class
1302 * 1340 *
1303 */ 1341 */
@@ -2566,6 +2604,22 @@ static struct omap_hwmod_ocp_if dra7xx_l3_main_1__hdmi = {
2566 .user = OCP_USER_MPU | OCP_USER_SDMA, 2604 .user = OCP_USER_MPU | OCP_USER_SDMA,
2567}; 2605};
2568 2606
2607/* l4_per2 -> mcasp3 */
2608static struct omap_hwmod_ocp_if dra7xx_l4_per2__mcasp3 = {
2609 .master = &dra7xx_l4_per2_hwmod,
2610 .slave = &dra7xx_mcasp3_hwmod,
2611 .clk = "l4_root_clk_div",
2612 .user = OCP_USER_MPU | OCP_USER_SDMA,
2613};
2614
2615/* l3_main_1 -> mcasp3 */
2616static struct omap_hwmod_ocp_if dra7xx_l3_main_1__mcasp3 = {
2617 .master = &dra7xx_l3_main_1_hwmod,
2618 .slave = &dra7xx_mcasp3_hwmod,
2619 .clk = "l3_iclk_div",
2620 .user = OCP_USER_MPU | OCP_USER_SDMA,
2621};
2622
2569/* l4_per1 -> elm */ 2623/* l4_per1 -> elm */
2570static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = { 2624static struct omap_hwmod_ocp_if dra7xx_l4_per1__elm = {
2571 .master = &dra7xx_l4_per1_hwmod, 2625 .master = &dra7xx_l4_per1_hwmod,
@@ -3308,6 +3362,8 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = {
3308 &dra7xx_l4_wkup__dcan1, 3362 &dra7xx_l4_wkup__dcan1,
3309 &dra7xx_l4_per2__dcan2, 3363 &dra7xx_l4_per2__dcan2,
3310 &dra7xx_l4_per2__cpgmac0, 3364 &dra7xx_l4_per2__cpgmac0,
3365 &dra7xx_l4_per2__mcasp3,
3366 &dra7xx_l3_main_1__mcasp3,
3311 &dra7xx_gmac__mdio, 3367 &dra7xx_gmac__mdio,
3312 &dra7xx_l4_cfg__dma_system, 3368 &dra7xx_l4_cfg__dma_system,
3313 &dra7xx_l3_main_1__dss, 3369 &dra7xx_l3_main_1__dss,
diff --git a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
index b1288f56d509..6256052893ec 100644
--- a/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_81xx_data.c
@@ -144,6 +144,7 @@ static struct omap_hwmod dm81xx_l4_ls_hwmod = {
144 .name = "l4_ls", 144 .name = "l4_ls",
145 .clkdm_name = "alwon_l3s_clkdm", 145 .clkdm_name = "alwon_l3s_clkdm",
146 .class = &l4_hwmod_class, 146 .class = &l4_hwmod_class,
147 .flags = HWMOD_NO_IDLEST,
147}; 148};
148 149
149/* 150/*
@@ -155,6 +156,7 @@ static struct omap_hwmod dm81xx_l4_hs_hwmod = {
155 .name = "l4_hs", 156 .name = "l4_hs",
156 .clkdm_name = "alwon_l3_med_clkdm", 157 .clkdm_name = "alwon_l3_med_clkdm",
157 .class = &l4_hwmod_class, 158 .class = &l4_hwmod_class,
159 .flags = HWMOD_NO_IDLEST,
158}; 160};
159 161
160/* L3 slow -> L4 ls peripheral interface running at 125MHz */ 162/* L3 slow -> L4 ls peripheral interface running at 125MHz */
@@ -850,6 +852,7 @@ static struct omap_hwmod dm816x_emac0_hwmod = {
850 .name = "emac0", 852 .name = "emac0",
851 .clkdm_name = "alwon_ethernet_clkdm", 853 .clkdm_name = "alwon_ethernet_clkdm",
852 .class = &dm816x_emac_hwmod_class, 854 .class = &dm816x_emac_hwmod_class,
855 .flags = HWMOD_NO_IDLEST,
853}; 856};
854 857
855static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = { 858static struct omap_hwmod_ocp_if dm81xx_l4_hs__emac0 = {
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 1dfe34654c43..58144779dec4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -24,9 +24,6 @@
24#include <linux/platform_data/iommu-omap.h> 24#include <linux/platform_data/iommu-omap.h>
25#include <linux/platform_data/wkup_m3.h> 25#include <linux/platform_data/wkup_m3.h>
26 26
27#include <asm/siginfo.h>
28#include <asm/signal.h>
29
30#include "common.h" 27#include "common.h"
31#include "common-board-devices.h" 28#include "common-board-devices.h"
32#include "dss-common.h" 29#include "dss-common.h"
@@ -385,29 +382,6 @@ static void __init omap3_pandora_legacy_init(void)
385} 382}
386#endif /* CONFIG_ARCH_OMAP3 */ 383#endif /* CONFIG_ARCH_OMAP3 */
387 384
388#ifdef CONFIG_SOC_TI81XX
389static int fault_fixed_up;
390
391static int t410_abort_handler(unsigned long addr, unsigned int fsr,
392 struct pt_regs *regs)
393{
394 if ((fsr == 0x406 || fsr == 0xc06) && !fault_fixed_up) {
395 pr_warn("External imprecise Data abort at addr=%#lx, fsr=%#x ignored.\n",
396 addr, fsr);
397 fault_fixed_up = 1;
398 return 0;
399 }
400
401 return 1;
402}
403
404static void __init t410_abort_init(void)
405{
406 hook_fault_code(16 + 6, t410_abort_handler, SIGBUS, BUS_OBJERR,
407 "imprecise external abort");
408}
409#endif
410
411#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) 385#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
412static struct iommu_platform_data omap4_iommu_pdata = { 386static struct iommu_platform_data omap4_iommu_pdata = {
413 .reset_name = "mmu_cache", 387 .reset_name = "mmu_cache",
@@ -536,9 +510,6 @@ static struct pdata_init pdata_quirks[] __initdata = {
536 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, }, 510 { "openpandora,omap3-pandora-600mhz", omap3_pandora_legacy_init, },
537 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, }, 511 { "openpandora,omap3-pandora-1ghz", omap3_pandora_legacy_init, },
538#endif 512#endif
539#ifdef CONFIG_SOC_TI81XX
540 { "hp,t410", t410_abort_init, },
541#endif
542#ifdef CONFIG_SOC_OMAP5 513#ifdef CONFIG_SOC_OMAP5
543 { "ti,omap5-uevm", omap5_uevm_legacy_init, }, 514 { "ti,omap5-uevm", omap5_uevm_legacy_init, },
544#endif 515#endif
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 87b98bf92366..2dbd3785ee6f 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -301,11 +301,11 @@ static void omap3_pm_idle(void)
301 if (omap_irq_pending()) 301 if (omap_irq_pending())
302 return; 302 return;
303 303
304 trace_cpu_idle(1, smp_processor_id()); 304 trace_cpu_idle_rcuidle(1, smp_processor_id());
305 305
306 omap_sram_idle(); 306 omap_sram_idle();
307 307
308 trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id()); 308 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
309} 309}
310 310
311#ifdef CONFIG_SUSPEND 311#ifdef CONFIG_SUSPEND
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index b18ebbefae09..f86692dbcfd5 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
320 return r; 320 return r;
321} 321}
322 322
323#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
324void tick_broadcast(const struct cpumask *mask)
325{
326}
327#endif
328
323static void __init omap2_gp_clockevent_init(int gptimer_id, 329static void __init omap2_gp_clockevent_init(int gptimer_id,
324 const char *fck_source, 330 const char *fck_source,
325 const char *property) 331 const char *property)
diff --git a/arch/arm/mach-orion5x/include/mach/entry-macro.S b/arch/arm/mach-orion5x/include/mach/entry-macro.S
index 79eb502a1e64..73919a36b577 100644
--- a/arch/arm/mach-orion5x/include/mach/entry-macro.S
+++ b/arch/arm/mach-orion5x/include/mach/entry-macro.S
@@ -21,5 +21,5 @@
21 @ find cause bits that are unmasked 21 @ find cause bits that are unmasked
22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any 22 ands \irqstat, \irqstat, \tmp @ clear Z flag if any
23 clzne \irqnr, \irqstat @ calc irqnr 23 clzne \irqnr, \irqstat @ calc irqnr
24 rsbne \irqnr, \irqnr, #31 24 rsbne \irqnr, \irqnr, #32
25 .endm 25 .endm
diff --git a/arch/arm/mach-pxa/ezx.c b/arch/arm/mach-pxa/ezx.c
index 9a9c15bfcd34..7c0d5618be5e 100644
--- a/arch/arm/mach-pxa/ezx.c
+++ b/arch/arm/mach-pxa/ezx.c
@@ -889,6 +889,7 @@ static void __init e680_init(void)
889 889
890 pxa_set_keypad_info(&e680_keypad_platform_data); 890 pxa_set_keypad_info(&e680_keypad_platform_data);
891 891
892 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
892 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 893 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
893 platform_add_devices(ARRAY_AND_SIZE(e680_devices)); 894 platform_add_devices(ARRAY_AND_SIZE(e680_devices));
894} 895}
@@ -956,6 +957,7 @@ static void __init a1200_init(void)
956 957
957 pxa_set_keypad_info(&a1200_keypad_platform_data); 958 pxa_set_keypad_info(&a1200_keypad_platform_data);
958 959
960 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
959 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 961 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
960 platform_add_devices(ARRAY_AND_SIZE(a1200_devices)); 962 platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
961} 963}
@@ -1148,6 +1150,7 @@ static void __init a910_init(void)
1148 platform_device_register(&a910_camera); 1150 platform_device_register(&a910_camera);
1149 } 1151 }
1150 1152
1153 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1151 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1154 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1152 platform_add_devices(ARRAY_AND_SIZE(a910_devices)); 1155 platform_add_devices(ARRAY_AND_SIZE(a910_devices));
1153} 1156}
@@ -1215,6 +1218,7 @@ static void __init e6_init(void)
1215 1218
1216 pxa_set_keypad_info(&e6_keypad_platform_data); 1219 pxa_set_keypad_info(&e6_keypad_platform_data);
1217 1220
1221 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1218 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1222 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1219 platform_add_devices(ARRAY_AND_SIZE(e6_devices)); 1223 platform_add_devices(ARRAY_AND_SIZE(e6_devices));
1220} 1224}
@@ -1256,6 +1260,7 @@ static void __init e2_init(void)
1256 1260
1257 pxa_set_keypad_info(&e2_keypad_platform_data); 1261 pxa_set_keypad_info(&e2_keypad_platform_data);
1258 1262
1263 pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
1259 platform_add_devices(ARRAY_AND_SIZE(ezx_devices)); 1264 platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
1260 platform_add_devices(ARRAY_AND_SIZE(e2_devices)); 1265 platform_add_devices(ARRAY_AND_SIZE(e2_devices));
1261} 1266}
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 13eba2b26e0a..8fbfb10047ec 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -344,7 +344,7 @@ void __init palm27x_pwm_init(int bl, int lcd)
344{ 344{
345 palm_bl_power = bl; 345 palm_bl_power = bl;
346 palm_lcd_power = lcd; 346 palm_lcd_power = lcd;
347 pwm_add_lookup(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup)); 347 pwm_add_table(palm27x_pwm_lookup, ARRAY_SIZE(palm27x_pwm_lookup));
348 platform_device_register(&palm27x_backlight); 348 platform_device_register(&palm27x_backlight);
349} 349}
350#endif 350#endif
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index aebf6de62468..0b5c3876720c 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -169,7 +169,7 @@ static inline void palmtc_keys_init(void) {}
169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE) 169#if defined(CONFIG_BACKLIGHT_PWM) || defined(CONFIG_BACKLIGHT_PWM_MODULE)
170static struct pwm_lookup palmtc_pwm_lookup[] = { 170static struct pwm_lookup palmtc_pwm_lookup[] = {
171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS, 171 PWM_LOOKUP("pxa25x-pwm.1", 0, "pwm-backlight.0", NULL, PALMTC_PERIOD_NS,
172 PWM_PERIOD_NORMAL), 172 PWM_POLARITY_NORMAL),
173}; 173};
174 174
175static struct platform_pwm_backlight_data palmtc_backlight_data = { 175static struct platform_pwm_backlight_data palmtc_backlight_data = {
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
index a19460e6e7b0..b355fca6cc2e 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-12000000.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_12[] = {
24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */ 24 { .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */ 25 { .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */ 26 { .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
diff --git a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
index 1191b2905625..be9a248b5ce9 100644
--- a/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
+++ b/arch/arm/mach-s3c24xx/pll-s3c2440-16934400.c
@@ -20,7 +20,7 @@
20#include <plat/cpu.h> 20#include <plat/cpu.h>
21#include <plat/cpu-freq-core.h> 21#include <plat/cpu-freq-core.h>
22 22
23static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = { 23static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */ 24 { .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */ 25 { .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */ 26 { .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
diff --git a/arch/arm/mach-shmobile/setup-r8a7793.c b/arch/arm/mach-shmobile/setup-r8a7793.c
index 1d2825cb7a65..5fce87f7f254 100644
--- a/arch/arm/mach-shmobile/setup-r8a7793.c
+++ b/arch/arm/mach-shmobile/setup-r8a7793.c
@@ -19,7 +19,7 @@
19#include "common.h" 19#include "common.h"
20#include "rcar-gen2.h" 20#include "rcar-gen2.h"
21 21
22static const char *r8a7793_boards_compat_dt[] __initconst = { 22static const char * const r8a7793_boards_compat_dt[] __initconst = {
23 "renesas,r8a7793", 23 "renesas,r8a7793",
24 NULL, 24 NULL,
25}; 25};
diff --git a/arch/arm/mach-zx/Kconfig b/arch/arm/mach-zx/Kconfig
index 7fdc5bf24f9b..446334a25cf5 100644
--- a/arch/arm/mach-zx/Kconfig
+++ b/arch/arm/mach-zx/Kconfig
@@ -13,7 +13,7 @@ config SOC_ZX296702
13 select ARM_GLOBAL_TIMER 13 select ARM_GLOBAL_TIMER
14 select HAVE_ARM_SCU if SMP 14 select HAVE_ARM_SCU if SMP
15 select HAVE_ARM_TWD if SMP 15 select HAVE_ARM_TWD if SMP
16 select PM_GENERIC_DOMAINS 16 select PM_GENERIC_DOMAINS if PM
17 help 17 help
18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP 18 Support for ZTE ZX296702 SoC which is a dual core CortexA9MP
19endif 19endif
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41332..c8c8b9ed02e0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
165 __flush_icache_all(); 165 __flush_icache_all();
166} 166}
167 167
168static int is_reserved_asid(u64 asid) 168static bool check_update_reserved_asid(u64 asid, u64 newasid)
169{ 169{
170 int cpu; 170 int cpu;
171 for_each_possible_cpu(cpu) 171 bool hit = false;
172 if (per_cpu(reserved_asids, cpu) == asid) 172
173 return 1; 173 /*
174 return 0; 174 * Iterate over the set of reserved ASIDs looking for a match.
175 * If we find one, then we can update our mm to use newasid
176 * (i.e. the same ASID in the current generation) but we can't
177 * exit the loop early, since we need to ensure that all copies
178 * of the old ASID are updated to reflect the mm. Failure to do
179 * so could result in us missing the reserved ASID in a future
180 * generation.
181 */
182 for_each_possible_cpu(cpu) {
183 if (per_cpu(reserved_asids, cpu) == asid) {
184 hit = true;
185 per_cpu(reserved_asids, cpu) = newasid;
186 }
187 }
188
189 return hit;
175} 190}
176 191
177static u64 new_context(struct mm_struct *mm, unsigned int cpu) 192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181 u64 generation = atomic64_read(&asid_generation); 196 u64 generation = atomic64_read(&asid_generation);
182 197
183 if (asid != 0) { 198 if (asid != 0) {
199 u64 newasid = generation | (asid & ~ASID_MASK);
200
184 /* 201 /*
185 * If our current ASID was active during a rollover, we 202 * If our current ASID was active during a rollover, we
186 * can continue to use it and this was just a false alarm. 203 * can continue to use it and this was just a false alarm.
187 */ 204 */
188 if (is_reserved_asid(asid)) 205 if (check_update_reserved_asid(asid, newasid))
189 return generation | (asid & ~ASID_MASK); 206 return newasid;
190 207
191 /* 208 /*
192 * We had a valid ASID in a previous life, so try to re-use 209 * We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
194 */ 211 */
195 asid &= ~ASID_MASK; 212 asid &= ~ASID_MASK;
196 if (!__test_and_set_bit(asid, asid_map)) 213 if (!__test_and_set_bit(asid, asid_map))
197 goto bump_gen; 214 return newasid;
198 } 215 }
199 216
200 /* 217 /*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
216 233
217 __set_bit(asid, asid_map); 234 __set_bit(asid, asid_map);
218 cur_idx = asid; 235 cur_idx = asid;
219
220bump_gen:
221 asid |= generation;
222 cpumask_clear(mm_cpumask(mm)); 236 cpumask_clear(mm_cpumask(mm));
223 return asid; 237 return asid | generation;
224} 238}
225 239
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e62400e5fb99..534a60ae282e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1521 return -ENOMEM; 1521 return -ENOMEM;
1522 1522
1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { 1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1524 phys_addr_t phys = sg_phys(s) & PAGE_MASK; 1524 phys_addr_t phys = page_to_phys(sg_page(s));
1525 unsigned int len = PAGE_ALIGN(s->offset + s->length); 1525 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1526 1526
1527 if (!is_coherent && 1527 if (!is_coherent &&
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc0f2..7f8cd1b3557f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h> 23#include <linux/dma-contiguous.h>
24#include <linux/sizes.h> 24#include <linux/sizes.h>
25#include <linux/stop_machine.h>
25 26
26#include <asm/cp15.h> 27#include <asm/cp15.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
627 * safe to be called with preemption disabled, as under stop_machine(). 628 * safe to be called with preemption disabled, as under stop_machine().
628 */ 629 */
629static inline void section_update(unsigned long addr, pmdval_t mask, 630static inline void section_update(unsigned long addr, pmdval_t mask,
630 pmdval_t prot) 631 pmdval_t prot, struct mm_struct *mm)
631{ 632{
632 struct mm_struct *mm;
633 pmd_t *pmd; 633 pmd_t *pmd;
634 634
635 mm = current->active_mm;
636 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 635 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
637 636
638#ifdef CONFIG_ARM_LPAE 637#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
656 return !!(get_cr() & CR_XP); 655 return !!(get_cr() & CR_XP);
657} 656}
658 657
659#define set_section_perms(perms, field) { \ 658void set_section_perms(struct section_perm *perms, int n, bool set,
660 size_t i; \ 659 struct mm_struct *mm)
661 unsigned long addr; \ 660{
662 \ 661 size_t i;
663 if (!arch_has_strict_perms()) \ 662 unsigned long addr;
664 return; \ 663
665 \ 664 if (!arch_has_strict_perms())
666 for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 665 return;
667 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 666
668 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 667 for (i = 0; i < n; i++) {
669 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 668 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
670 perms[i].start, perms[i].end, \ 669 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
671 SECTION_SIZE); \ 670 pr_err("BUG: section %lx-%lx not aligned to %lx\n",
672 continue; \ 671 perms[i].start, perms[i].end,
673 } \ 672 SECTION_SIZE);
674 \ 673 continue;
675 for (addr = perms[i].start; \ 674 }
676 addr < perms[i].end; \ 675
677 addr += SECTION_SIZE) \ 676 for (addr = perms[i].start;
678 section_update(addr, perms[i].mask, \ 677 addr < perms[i].end;
679 perms[i].field); \ 678 addr += SECTION_SIZE)
680 } \ 679 section_update(addr, perms[i].mask,
680 set ? perms[i].prot : perms[i].clear, mm);
681 }
682
681} 683}
682 684
683static inline void fix_kernmem_perms(void) 685static void update_sections_early(struct section_perm perms[], int n)
684{ 686{
685 set_section_perms(nx_perms, prot); 687 struct task_struct *t, *s;
688
689 read_lock(&tasklist_lock);
690 for_each_process(t) {
691 if (t->flags & PF_KTHREAD)
692 continue;
693 for_each_thread(t, s)
694 set_section_perms(perms, n, true, s->mm);
695 }
696 read_unlock(&tasklist_lock);
697 set_section_perms(perms, n, true, current->active_mm);
698 set_section_perms(perms, n, true, &init_mm);
699}
700
701int __fix_kernmem_perms(void *unused)
702{
703 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
704 return 0;
705}
706
707void fix_kernmem_perms(void)
708{
709 stop_machine(__fix_kernmem_perms, NULL, NULL);
686} 710}
687 711
688#ifdef CONFIG_DEBUG_RODATA 712#ifdef CONFIG_DEBUG_RODATA
713int __mark_rodata_ro(void *unused)
714{
715 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
716 return 0;
717}
718
689void mark_rodata_ro(void) 719void mark_rodata_ro(void)
690{ 720{
691 set_section_perms(ro_perms, prot); 721 stop_machine(__mark_rodata_ro, NULL, NULL);
692} 722}
693 723
694void set_kernel_text_rw(void) 724void set_kernel_text_rw(void)
695{ 725{
696 set_section_perms(ro_perms, clear); 726 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
727 current->active_mm);
697} 728}
698 729
699void set_kernel_text_ro(void) 730void set_kernel_text_ro(void)
700{ 731{
701 set_section_perms(ro_perms, prot); 732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
733 current->active_mm);
702} 734}
703#endif /* CONFIG_DEBUG_RODATA */ 735#endif /* CONFIG_DEBUG_RODATA */
704 736
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fed38..8e1ea433c3f1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
95.equ cpu_v7_suspend_size, 4 * 9 95.equ cpu_v7_suspend_size, 4 * 9
96#ifdef CONFIG_ARM_CPU_SUSPEND 96#ifdef CONFIG_ARM_CPU_SUSPEND
97ENTRY(cpu_v7_do_suspend) 97ENTRY(cpu_v7_do_suspend)
98 stmfd sp!, {r4 - r10, lr} 98 stmfd sp!, {r4 - r11, lr}
99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
101 stmia r0!, {r4 - r5} 101 stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
114 stmia r0, {r5 - r11} 114 stmia r0, {r5 - r11}
115 ldmfd sp!, {r4 - r10, pc} 115 ldmfd sp!, {r4 - r11, pc}
116ENDPROC(cpu_v7_do_suspend) 116ENDPROC(cpu_v7_do_suspend)
117 117
118ENTRY(cpu_v7_do_resume) 118ENTRY(cpu_v7_do_resume)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a482ff1..871f21783866 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -49,7 +49,7 @@ config ARM64
49 select HAVE_ARCH_AUDITSYSCALL 49 select HAVE_ARCH_AUDITSYSCALL
50 select HAVE_ARCH_BITREVERSE 50 select HAVE_ARCH_BITREVERSE
51 select HAVE_ARCH_JUMP_LABEL 51 select HAVE_ARCH_JUMP_LABEL
52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP 52 select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
53 select HAVE_ARCH_KGDB 53 select HAVE_ARCH_KGDB
54 select HAVE_ARCH_SECCOMP_FILTER 54 select HAVE_ARCH_SECCOMP_FILTER
55 select HAVE_ARCH_TRACEHOOK 55 select HAVE_ARCH_TRACEHOOK
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
316 316
317 If unsure, say Y. 317 If unsure, say Y.
318 318
319config ARM64_ERRATUM_834220
320 bool "Cortex-A57: 834220: Stage 2 translation fault might be incorrectly reported in presence of a Stage 1 fault"
321 depends on KVM
322 default y
323 help
324 This option adds an alternative code sequence to work around ARM
325 erratum 834220 on Cortex-A57 parts up to r1p2.
326
327 Affected Cortex-A57 parts might report a Stage 2 translation
328 fault as the result of a Stage 1 fault for load crossing a
329 page boundary when there is a permission or device memory
330 alignment fault at Stage 1 and a translation fault at Stage 2.
331
332 The workaround is to verify that the Stage 1 translation
333 doesn't generate a fault before handling the Stage 2 fault.
334 Please note that this does not necessarily enable the workaround,
335 as it depends on the alternative framework, which will only patch
336 the kernel if an affected CPU is detected.
337
338 If unsure, say Y.
339
319config ARM64_ERRATUM_845719 340config ARM64_ERRATUM_845719
320 bool "Cortex-A53: 845719: a load might read incorrect data" 341 bool "Cortex-A53: 845719: a load might read incorrect data"
321 depends on COMPAT 342 depends on COMPAT
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
index e81cd48d6245..925552e7b4f3 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls2080a.dtsi
@@ -269,6 +269,7 @@
269 clock-frequency = <0>; /* Updated by bootloader */ 269 clock-frequency = <0>; /* Updated by bootloader */
270 voltage-ranges = <1800 1800 3300 3300>; 270 voltage-ranges = <1800 1800 3300 3300>;
271 sdhci,auto-cmd12; 271 sdhci,auto-cmd12;
272 little-endian;
272 bus-width = <4>; 273 bus-width = <4>;
273 }; 274 };
274 275
@@ -277,6 +278,7 @@
277 reg = <0x0 0x2300000 0x0 0x10000>; 278 reg = <0x0 0x2300000 0x0 0x10000>;
278 interrupts = <0 36 0x4>; /* Level high type */ 279 interrupts = <0 36 0x4>; /* Level high type */
279 gpio-controller; 280 gpio-controller;
281 little-endian;
280 #gpio-cells = <2>; 282 #gpio-cells = <2>;
281 interrupt-controller; 283 interrupt-controller;
282 #interrupt-cells = <2>; 284 #interrupt-cells = <2>;
@@ -287,6 +289,7 @@
287 reg = <0x0 0x2310000 0x0 0x10000>; 289 reg = <0x0 0x2310000 0x0 0x10000>;
288 interrupts = <0 36 0x4>; /* Level high type */ 290 interrupts = <0 36 0x4>; /* Level high type */
289 gpio-controller; 291 gpio-controller;
292 little-endian;
290 #gpio-cells = <2>; 293 #gpio-cells = <2>;
291 interrupt-controller; 294 interrupt-controller;
292 #interrupt-cells = <2>; 295 #interrupt-cells = <2>;
@@ -297,6 +300,7 @@
297 reg = <0x0 0x2320000 0x0 0x10000>; 300 reg = <0x0 0x2320000 0x0 0x10000>;
298 interrupts = <0 37 0x4>; /* Level high type */ 301 interrupts = <0 37 0x4>; /* Level high type */
299 gpio-controller; 302 gpio-controller;
303 little-endian;
300 #gpio-cells = <2>; 304 #gpio-cells = <2>;
301 interrupt-controller; 305 interrupt-controller;
302 #interrupt-cells = <2>; 306 #interrupt-cells = <2>;
@@ -307,6 +311,7 @@
307 reg = <0x0 0x2330000 0x0 0x10000>; 311 reg = <0x0 0x2330000 0x0 0x10000>;
308 interrupts = <0 37 0x4>; /* Level high type */ 312 interrupts = <0 37 0x4>; /* Level high type */
309 gpio-controller; 313 gpio-controller;
314 little-endian;
310 #gpio-cells = <2>; 315 #gpio-cells = <2>;
311 interrupt-controller; 316 interrupt-controller;
312 #interrupt-cells = <2>; 317 #interrupt-cells = <2>;
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 030cdcb46c6b..2731d3b25ed2 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -77,6 +77,7 @@
77#ifndef __ASSEMBLY__ 77#ifndef __ASSEMBLY__
78 78
79#include <linux/stringify.h> 79#include <linux/stringify.h>
80#include <asm/barrier.h>
80 81
81/* 82/*
82 * Low-level accessors 83 * Low-level accessors
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0fdd54..8f271b83f910 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
29#define ARM64_HAS_PAN 4 29#define ARM64_HAS_PAN 4
30#define ARM64_HAS_LSE_ATOMICS 5 30#define ARM64_HAS_LSE_ATOMICS 5
31#define ARM64_WORKAROUND_CAVIUM_23154 6 31#define ARM64_WORKAROUND_CAVIUM_23154 6
32#define ARM64_WORKAROUND_834220 7
32 33
33#define ARM64_NCAPS 7 34#define ARM64_NCAPS 8
34 35
35#ifndef __ASSEMBLY__ 36#ifndef __ASSEMBLY__
36 37
@@ -46,8 +47,12 @@ enum ftr_type {
46#define FTR_STRICT true /* SANITY check strict matching required */ 47#define FTR_STRICT true /* SANITY check strict matching required */
47#define FTR_NONSTRICT false /* SANITY check ignored */ 48#define FTR_NONSTRICT false /* SANITY check ignored */
48 49
50#define FTR_SIGNED true /* Value should be treated as signed */
51#define FTR_UNSIGNED false /* Value should be treated as unsigned */
52
49struct arm64_ftr_bits { 53struct arm64_ftr_bits {
50 bool strict; /* CPU Sanity check: strict matching required ? */ 54 bool sign; /* Value is signed ? */
55 bool strict; /* CPU Sanity check: strict matching required ? */
51 enum ftr_type type; 56 enum ftr_type type;
52 u8 shift; 57 u8 shift;
53 u8 width; 58 u8 width;
@@ -123,6 +128,18 @@ cpuid_feature_extract_field(u64 features, int field)
123 return cpuid_feature_extract_field_width(features, field, 4); 128 return cpuid_feature_extract_field_width(features, field, 4);
124} 129}
125 130
131static inline unsigned int __attribute_const__
132cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
133{
134 return (u64)(features << (64 - width - field)) >> (64 - width);
135}
136
137static inline unsigned int __attribute_const__
138cpuid_feature_extract_unsigned_field(u64 features, int field)
139{
140 return cpuid_feature_extract_unsigned_field_width(features, field, 4);
141}
142
126static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp) 143static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
127{ 144{
128 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift); 145 return (u64)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
@@ -130,7 +147,9 @@ static inline u64 arm64_ftr_mask(struct arm64_ftr_bits *ftrp)
130 147
131static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val) 148static inline s64 arm64_ftr_value(struct arm64_ftr_bits *ftrp, u64 val)
132{ 149{
133 return cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width); 150 return ftrp->sign ?
151 cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width) :
152 cpuid_feature_extract_unsigned_field_width(val, ftrp->shift, ftrp->width);
134} 153}
135 154
136static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0) 155static inline bool id_aa64mmfr0_mixed_endian_el0(u64 mmfr0)
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index e54415ec6935..9732908bfc8a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -138,16 +138,18 @@ extern struct pmu perf_ops_bp;
138/* Determine number of BRP registers available. */ 138/* Determine number of BRP registers available. */
139static inline int get_num_brps(void) 139static inline int get_num_brps(void)
140{ 140{
141 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
141 return 1 + 142 return 1 +
142 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 143 cpuid_feature_extract_unsigned_field(dfr0,
143 ID_AA64DFR0_BRPS_SHIFT); 144 ID_AA64DFR0_BRPS_SHIFT);
144} 145}
145 146
146/* Determine number of WRP registers available. */ 147/* Determine number of WRP registers available. */
147static inline int get_num_wrps(void) 148static inline int get_num_wrps(void)
148{ 149{
150 u64 dfr0 = read_system_reg(SYS_ID_AA64DFR0_EL1);
149 return 1 + 151 return 1 +
150 cpuid_feature_extract_field(read_system_reg(SYS_ID_AA64DFR0_EL1), 152 cpuid_feature_extract_unsigned_field(dfr0,
151 ID_AA64DFR0_WRPS_SHIFT); 153 ID_AA64DFR0_WRPS_SHIFT);
152} 154}
153 155
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 23eb450b820b..8e8d30684392 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -7,4 +7,9 @@ struct pt_regs;
7 7
8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); 8extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
9 9
10static inline int nr_legacy_irqs(void)
11{
12 return 0;
13}
14
10#endif 15#endif
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..25a40213bd9b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,12 +99,22 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT; 99 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
100} 100}
101 101
102static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num) 102/*
103 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
104 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
105 * AArch32 with banked registers.
106 */
107static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
108 u8 reg_num)
103{ 109{
104 if (vcpu_mode_is_32bit(vcpu)) 110 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
105 return vcpu_reg32(vcpu, reg_num); 111}
106 112
107 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num]; 113static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
114 unsigned long val)
115{
116 if (reg_num != 31)
117 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
108} 118}
109 119
110/* Get vcpu SPSR for current mode */ 120/* Get vcpu SPSR for current mode */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 7e074f93f383..63f52b55defe 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
276 * hardware updates of the pte (ptep_set_access_flags safely changes 276 * hardware updates of the pte (ptep_set_access_flags safely changes
277 * valid ptes without going through an invalid entry). 277 * valid ptes without going through an invalid entry).
278 */ 278 */
279 if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && 279 if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
280 pte_valid(*ptep)) { 280 pte_valid(*ptep) && pte_valid(pte)) {
281 BUG_ON(!pte_young(pte)); 281 VM_WARN_ONCE(!pte_young(pte),
282 BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); 282 "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
283 __func__, pte_val(*ptep), pte_val(pte));
284 VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
285 "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
286 __func__, pte_val(*ptep), pte_val(pte));
283 } 287 }
284 288
285 set_pte(ptep, pte); 289 set_pte(ptep, pte);
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2504f7..feb6b4efa641 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
75 (1 << MIDR_VARIANT_SHIFT) | 2), 75 (1 << MIDR_VARIANT_SHIFT) | 2),
76 }, 76 },
77#endif 77#endif
78#ifdef CONFIG_ARM64_ERRATUM_834220
79 {
80 /* Cortex-A57 r0p0 - r1p2 */
81 .desc = "ARM erratum 834220",
82 .capability = ARM64_WORKAROUND_834220,
83 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
84 (1 << MIDR_VARIANT_SHIFT) | 2),
85 },
86#endif
78#ifdef CONFIG_ARM64_ERRATUM_845719 87#ifdef CONFIG_ARM64_ERRATUM_845719
79 { 88 {
80 /* Cortex-A53 r0p[01234] */ 89 /* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c8cf89223b5a..0669c63281ea 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -44,8 +44,9 @@ unsigned int compat_elf_hwcap2 __read_mostly;
44 44
45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); 45DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
46 46
47#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ 47#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
48 { \ 48 { \
49 .sign = SIGNED, \
49 .strict = STRICT, \ 50 .strict = STRICT, \
50 .type = TYPE, \ 51 .type = TYPE, \
51 .shift = SHIFT, \ 52 .shift = SHIFT, \
@@ -53,6 +54,14 @@ DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
53 .safe_val = SAFE_VAL, \ 54 .safe_val = SAFE_VAL, \
54 } 55 }
55 56
57/* Define a feature with signed values */
58#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
59 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
60
61/* Define a feature with unsigned value */
62#define U_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
63 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
64
56#define ARM64_FTR_END \ 65#define ARM64_FTR_END \
57 { \ 66 { \
58 .width = 0, \ 67 .width = 0, \
@@ -99,7 +108,7 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
99 * Differing PARange is fine as long as all peripherals and memory are mapped 108 * Differing PARange is fine as long as all peripherals and memory are mapped
100 * within the minimum PARange of all CPUs 109 * within the minimum PARange of all CPUs
101 */ 110 */
102 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), 111 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
103 ARM64_FTR_END, 112 ARM64_FTR_END,
104}; 113};
105 114
@@ -115,18 +124,18 @@ static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
115}; 124};
116 125
117static struct arm64_ftr_bits ftr_ctr[] = { 126static struct arm64_ftr_bits ftr_ctr[] = {
118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ 127 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
119 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), 128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0),
120 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ 129 U_ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
121 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ 130 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
122 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ 131 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
123 /* 132 /*
124 * Linux can handle differing I-cache policies. Userspace JITs will 133 * Linux can handle differing I-cache policies. Userspace JITs will
125 * make use of *minLine 134 * make use of *minLine
126 */ 135 */
127 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ 136 U_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ 137 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
129 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ 138 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
130 ARM64_FTR_END, 139 ARM64_FTR_END,
131}; 140};
132 141
@@ -144,12 +153,12 @@ static struct arm64_ftr_bits ftr_id_mmfr0[] = {
144 153
145static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { 154static struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
146 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), 155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
147 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), 156 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
148 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), 157 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
149 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), 158 U_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
150 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), 159 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
151 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), 160 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
152 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), 161 U_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
153 ARM64_FTR_END, 162 ARM64_FTR_END,
154}; 163};
155 164
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index fc5508e0df57..4eeb17198cfa 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -127,7 +127,11 @@ static int __init uefi_init(void)
127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; 127 table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables;
128 config_tables = early_memremap(efi_to_phys(efi.systab->tables), 128 config_tables = early_memremap(efi_to_phys(efi.systab->tables),
129 table_size); 129 table_size);
130 130 if (config_tables == NULL) {
131 pr_warn("Unable to map EFI config table array.\n");
132 retval = -ENOMEM;
133 goto out;
134 }
131 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, 135 retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
132 sizeof(efi_config_table_64_t), NULL); 136 sizeof(efi_config_table_64_t), NULL);
133 137
@@ -209,6 +213,14 @@ void __init efi_init(void)
209 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK))); 213 PAGE_ALIGN(params.mmap_size + (params.mmap & ~PAGE_MASK)));
210 memmap.phys_map = params.mmap; 214 memmap.phys_map = params.mmap;
211 memmap.map = early_memremap(params.mmap, params.mmap_size); 215 memmap.map = early_memremap(params.mmap, params.mmap_size);
216 if (memmap.map == NULL) {
217 /*
218 * If we are booting via UEFI, the UEFI memory map is the only
219 * description of memory we have, so there is little point in
220 * proceeding if we cannot access it.
221 */
222 panic("Unable to map EFI memory map.\n");
223 }
212 memmap.map_end = memmap.map + params.mmap_size; 224 memmap.map_end = memmap.map + params.mmap_size;
213 memmap.desc_size = params.desc_size; 225 memmap.desc_size = params.desc_size;
214 memmap.desc_version = params.desc_ver; 226 memmap.desc_version = params.desc_ver;
@@ -227,7 +239,6 @@ static bool __init efi_virtmap_init(void)
227 init_new_context(NULL, &efi_mm); 239 init_new_context(NULL, &efi_mm);
228 240
229 for_each_efi_memory_desc(&memmap, md) { 241 for_each_efi_memory_desc(&memmap, md) {
230 u64 paddr, npages, size;
231 pgprot_t prot; 242 pgprot_t prot;
232 243
233 if (!(md->attribute & EFI_MEMORY_RUNTIME)) 244 if (!(md->attribute & EFI_MEMORY_RUNTIME))
@@ -235,11 +246,6 @@ static bool __init efi_virtmap_init(void)
235 if (md->virt_addr == 0) 246 if (md->virt_addr == 0)
236 return false; 247 return false;
237 248
238 paddr = md->phys_addr;
239 npages = md->num_pages;
240 memrange_efi_to_native(&paddr, &npages);
241 size = npages << PAGE_SHIFT;
242
243 pr_info(" EFI remap 0x%016llx => %p\n", 249 pr_info(" EFI remap 0x%016llx => %p\n",
244 md->phys_addr, (void *)md->virt_addr); 250 md->phys_addr, (void *)md->virt_addr);
245 251
@@ -256,7 +262,8 @@ static bool __init efi_virtmap_init(void)
256 else 262 else
257 prot = PAGE_KERNEL; 263 prot = PAGE_KERNEL;
258 264
259 create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, 265 create_pgd_mapping(&efi_mm, md->phys_addr, md->virt_addr,
266 md->num_pages << EFI_PAGE_SHIFT,
260 __pgprot(pgprot_val(prot) | PTE_NG)); 267 __pgprot(pgprot_val(prot) | PTE_NG));
261 } 268 }
262 return true; 269 return true;
@@ -273,12 +280,12 @@ static int __init arm64_enable_runtime_services(void)
273 280
274 if (!efi_enabled(EFI_BOOT)) { 281 if (!efi_enabled(EFI_BOOT)) {
275 pr_info("EFI services will not be available.\n"); 282 pr_info("EFI services will not be available.\n");
276 return -1; 283 return 0;
277 } 284 }
278 285
279 if (efi_runtime_disabled()) { 286 if (efi_runtime_disabled()) {
280 pr_info("EFI runtime services will be disabled.\n"); 287 pr_info("EFI runtime services will be disabled.\n");
281 return -1; 288 return 0;
282 } 289 }
283 290
284 pr_info("Remapping and enabling EFI services.\n"); 291 pr_info("Remapping and enabling EFI services.\n");
@@ -288,7 +295,7 @@ static int __init arm64_enable_runtime_services(void)
288 mapsize); 295 mapsize);
289 if (!memmap.map) { 296 if (!memmap.map) {
290 pr_err("Failed to remap EFI memory map\n"); 297 pr_err("Failed to remap EFI memory map\n");
291 return -1; 298 return -ENOMEM;
292 } 299 }
293 memmap.map_end = memmap.map + mapsize; 300 memmap.map_end = memmap.map + mapsize;
294 efi.memmap = &memmap; 301 efi.memmap = &memmap;
@@ -297,13 +304,13 @@ static int __init arm64_enable_runtime_services(void)
297 sizeof(efi_system_table_t)); 304 sizeof(efi_system_table_t));
298 if (!efi.systab) { 305 if (!efi.systab) {
299 pr_err("Failed to remap EFI System Table\n"); 306 pr_err("Failed to remap EFI System Table\n");
300 return -1; 307 return -ENOMEM;
301 } 308 }
302 set_bit(EFI_SYSTEM_TABLES, &efi.flags); 309 set_bit(EFI_SYSTEM_TABLES, &efi.flags);
303 310
304 if (!efi_virtmap_init()) { 311 if (!efi_virtmap_init()) {
305 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); 312 pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
306 return -1; 313 return -ENOMEM;
307 } 314 }
308 315
309 /* Set up runtime services function pointers */ 316 /* Set up runtime services function pointers */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1ee2c3937d4e..71426a78db12 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -5,6 +5,7 @@
5 */ 5 */
6 6
7#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
8#include <asm/cache.h>
8#include <asm/kernel-pgtable.h> 9#include <asm/kernel-pgtable.h>
9#include <asm/thread_info.h> 10#include <asm/thread_info.h>
10#include <asm/memory.h> 11#include <asm/memory.h>
@@ -140,7 +141,7 @@ SECTIONS
140 ARM_EXIT_KEEP(EXIT_DATA) 141 ARM_EXIT_KEEP(EXIT_DATA)
141 } 142 }
142 143
143 PERCPU_SECTION(64) 144 PERCPU_SECTION(L1_CACHE_BYTES)
144 145
145 . = ALIGN(PAGE_SIZE); 146 . = ALIGN(PAGE_SIZE);
146 __init_end = .; 147 __init_end = .;
@@ -158,7 +159,7 @@ SECTIONS
158 . = ALIGN(PAGE_SIZE); 159 . = ALIGN(PAGE_SIZE);
159 _data = .; 160 _data = .;
160 _sdata = .; 161 _sdata = .;
161 RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE) 162 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
162 PECOFF_EDATA_PADDING 163 PECOFF_EDATA_PADDING
163 _edata = .; 164 _edata = .;
164 165
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 68a0759b1375..15f0477b0d2a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -37,7 +37,7 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
37{ 37{
38 int ret; 38 int ret;
39 39
40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), 40 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
41 kvm_vcpu_hvc_get_imm(vcpu)); 41 kvm_vcpu_hvc_get_imm(vcpu));
42 42
43 ret = kvm_psci_call(vcpu); 43 ret = kvm_psci_call(vcpu);
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701ef044..86c289832272 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
864ENDPROC(__kvm_flush_vm_context) 864ENDPROC(__kvm_flush_vm_context)
865 865
866__kvm_hyp_panic: 866__kvm_hyp_panic:
867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
868 mrs x0, par_el1
869 push x0, xzr
870
867 // Guess the context by looking at VTTBR: 871 // Guess the context by looking at VTTBR:
868 // If zero, then we're already a host. 872 // If zero, then we're already a host.
869 // Otherwise restore a minimal host context before panicing. 873 // Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
898 mrs x3, esr_el2 902 mrs x3, esr_el2
899 mrs x4, far_el2 903 mrs x4, far_el2
900 mrs x5, hpfar_el2 904 mrs x5, hpfar_el2
901 mrs x6, par_el1 905 pop x6, xzr // active context PAR_EL1
902 mrs x7, tpidr_el2 906 mrs x7, tpidr_el2
903 907
904 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ 908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
@@ -914,7 +918,7 @@ __kvm_hyp_panic:
914ENDPROC(__kvm_hyp_panic) 918ENDPROC(__kvm_hyp_panic)
915 919
916__hyp_panic_str: 920__hyp_panic_str:
917 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" 921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
918 922
919 .align 2 923 .align 2
920 924
@@ -1015,9 +1019,15 @@ el1_trap:
1015 b.ne 1f // Not an abort we care about 1019 b.ne 1f // Not an abort we care about
1016 1020
1017 /* This is an abort. Check for permission fault */ 1021 /* This is an abort. Check for permission fault */
1022alternative_if_not ARM64_WORKAROUND_834220
1018 and x2, x1, #ESR_ELx_FSC_TYPE 1023 and x2, x1, #ESR_ELx_FSC_TYPE
1019 cmp x2, #FSC_PERM 1024 cmp x2, #FSC_PERM
1020 b.ne 1f // Not a permission fault 1025 b.ne 1f // Not a permission fault
1026alternative_else
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1030alternative_endif
1021 1031
1022 /* 1032 /*
1023 * Check for Stage-1 page table walk, which is guaranteed 1033 * Check for Stage-1 page table walk, which is guaranteed
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
48 48
49 /* Note: These now point to the banked copies */ 49 /* Note: These now point to the banked copies */
50 *vcpu_spsr(vcpu) = new_spsr_value; 50 *vcpu_spsr(vcpu) = new_spsr_value;
51 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; 51 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
52 52
53 /* Branch to exception vector */ 53 /* Branch to exception vector */
54 if (sctlr & (1 << 13)) 54 if (sctlr & (1 << 13))
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 87a64e8db04c..d2650e84faf2 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -78,7 +78,7 @@ static u32 get_ccsidr(u32 csselr)
78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 78 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
79 */ 79 */
80static bool access_dcsw(struct kvm_vcpu *vcpu, 80static bool access_dcsw(struct kvm_vcpu *vcpu,
81 const struct sys_reg_params *p, 81 struct sys_reg_params *p,
82 const struct sys_reg_desc *r) 82 const struct sys_reg_desc *r)
83{ 83{
84 if (!p->is_write) 84 if (!p->is_write)
@@ -94,21 +94,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
94 * sys_regs and leave it in complete control of the caches. 94 * sys_regs and leave it in complete control of the caches.
95 */ 95 */
96static bool access_vm_reg(struct kvm_vcpu *vcpu, 96static bool access_vm_reg(struct kvm_vcpu *vcpu,
97 const struct sys_reg_params *p, 97 struct sys_reg_params *p,
98 const struct sys_reg_desc *r) 98 const struct sys_reg_desc *r)
99{ 99{
100 unsigned long val;
101 bool was_enabled = vcpu_has_cache_enabled(vcpu); 100 bool was_enabled = vcpu_has_cache_enabled(vcpu);
102 101
103 BUG_ON(!p->is_write); 102 BUG_ON(!p->is_write);
104 103
105 val = *vcpu_reg(vcpu, p->Rt);
106 if (!p->is_aarch32) { 104 if (!p->is_aarch32) {
107 vcpu_sys_reg(vcpu, r->reg) = val; 105 vcpu_sys_reg(vcpu, r->reg) = p->regval;
108 } else { 106 } else {
109 if (!p->is_32bit) 107 if (!p->is_32bit)
110 vcpu_cp15_64_high(vcpu, r->reg) = val >> 32; 108 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval);
111 vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; 109 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval);
112 } 110 }
113 111
114 kvm_toggle_cache(vcpu, was_enabled); 112 kvm_toggle_cache(vcpu, was_enabled);
@@ -122,22 +120,19 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
122 * for both AArch64 and AArch32 accesses. 120 * for both AArch64 and AArch32 accesses.
123 */ 121 */
124static bool access_gic_sgi(struct kvm_vcpu *vcpu, 122static bool access_gic_sgi(struct kvm_vcpu *vcpu,
125 const struct sys_reg_params *p, 123 struct sys_reg_params *p,
126 const struct sys_reg_desc *r) 124 const struct sys_reg_desc *r)
127{ 125{
128 u64 val;
129
130 if (!p->is_write) 126 if (!p->is_write)
131 return read_from_write_only(vcpu, p); 127 return read_from_write_only(vcpu, p);
132 128
133 val = *vcpu_reg(vcpu, p->Rt); 129 vgic_v3_dispatch_sgi(vcpu, p->regval);
134 vgic_v3_dispatch_sgi(vcpu, val);
135 130
136 return true; 131 return true;
137} 132}
138 133
139static bool trap_raz_wi(struct kvm_vcpu *vcpu, 134static bool trap_raz_wi(struct kvm_vcpu *vcpu,
140 const struct sys_reg_params *p, 135 struct sys_reg_params *p,
141 const struct sys_reg_desc *r) 136 const struct sys_reg_desc *r)
142{ 137{
143 if (p->is_write) 138 if (p->is_write)
@@ -147,19 +142,19 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu,
147} 142}
148 143
149static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 144static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
150 const struct sys_reg_params *p, 145 struct sys_reg_params *p,
151 const struct sys_reg_desc *r) 146 const struct sys_reg_desc *r)
152{ 147{
153 if (p->is_write) { 148 if (p->is_write) {
154 return ignore_write(vcpu, p); 149 return ignore_write(vcpu, p);
155 } else { 150 } else {
156 *vcpu_reg(vcpu, p->Rt) = (1 << 3); 151 p->regval = (1 << 3);
157 return true; 152 return true;
158 } 153 }
159} 154}
160 155
161static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 156static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
162 const struct sys_reg_params *p, 157 struct sys_reg_params *p,
163 const struct sys_reg_desc *r) 158 const struct sys_reg_desc *r)
164{ 159{
165 if (p->is_write) { 160 if (p->is_write) {
@@ -167,7 +162,7 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
167 } else { 162 } else {
168 u32 val; 163 u32 val;
169 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 164 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
170 *vcpu_reg(vcpu, p->Rt) = val; 165 p->regval = val;
171 return true; 166 return true;
172 } 167 }
173} 168}
@@ -200,17 +195,17 @@ static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
200 * now use the debug registers. 195 * now use the debug registers.
201 */ 196 */
202static bool trap_debug_regs(struct kvm_vcpu *vcpu, 197static bool trap_debug_regs(struct kvm_vcpu *vcpu,
203 const struct sys_reg_params *p, 198 struct sys_reg_params *p,
204 const struct sys_reg_desc *r) 199 const struct sys_reg_desc *r)
205{ 200{
206 if (p->is_write) { 201 if (p->is_write) {
207 vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 202 vcpu_sys_reg(vcpu, r->reg) = p->regval;
208 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 203 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
209 } else { 204 } else {
210 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg); 205 p->regval = vcpu_sys_reg(vcpu, r->reg);
211 } 206 }
212 207
213 trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt)); 208 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
214 209
215 return true; 210 return true;
216} 211}
@@ -225,10 +220,10 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
225 * hyp.S code switches between host and guest values in future. 220 * hyp.S code switches between host and guest values in future.
226 */ 221 */
227static inline void reg_to_dbg(struct kvm_vcpu *vcpu, 222static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
228 const struct sys_reg_params *p, 223 struct sys_reg_params *p,
229 u64 *dbg_reg) 224 u64 *dbg_reg)
230{ 225{
231 u64 val = *vcpu_reg(vcpu, p->Rt); 226 u64 val = p->regval;
232 227
233 if (p->is_32bit) { 228 if (p->is_32bit) {
234 val &= 0xffffffffUL; 229 val &= 0xffffffffUL;
@@ -240,19 +235,16 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
240} 235}
241 236
242static inline void dbg_to_reg(struct kvm_vcpu *vcpu, 237static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
243 const struct sys_reg_params *p, 238 struct sys_reg_params *p,
244 u64 *dbg_reg) 239 u64 *dbg_reg)
245{ 240{
246 u64 val = *dbg_reg; 241 p->regval = *dbg_reg;
247
248 if (p->is_32bit) 242 if (p->is_32bit)
249 val &= 0xffffffffUL; 243 p->regval &= 0xffffffffUL;
250
251 *vcpu_reg(vcpu, p->Rt) = val;
252} 244}
253 245
254static inline bool trap_bvr(struct kvm_vcpu *vcpu, 246static inline bool trap_bvr(struct kvm_vcpu *vcpu,
255 const struct sys_reg_params *p, 247 struct sys_reg_params *p,
256 const struct sys_reg_desc *rd) 248 const struct sys_reg_desc *rd)
257{ 249{
258 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 250 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -294,7 +286,7 @@ static inline void reset_bvr(struct kvm_vcpu *vcpu,
294} 286}
295 287
296static inline bool trap_bcr(struct kvm_vcpu *vcpu, 288static inline bool trap_bcr(struct kvm_vcpu *vcpu,
297 const struct sys_reg_params *p, 289 struct sys_reg_params *p,
298 const struct sys_reg_desc *rd) 290 const struct sys_reg_desc *rd)
299{ 291{
300 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 292 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
@@ -337,7 +329,7 @@ static inline void reset_bcr(struct kvm_vcpu *vcpu,
337} 329}
338 330
339static inline bool trap_wvr(struct kvm_vcpu *vcpu, 331static inline bool trap_wvr(struct kvm_vcpu *vcpu,
340 const struct sys_reg_params *p, 332 struct sys_reg_params *p,
341 const struct sys_reg_desc *rd) 333 const struct sys_reg_desc *rd)
342{ 334{
343 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 335 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
@@ -380,7 +372,7 @@ static inline void reset_wvr(struct kvm_vcpu *vcpu,
380} 372}
381 373
382static inline bool trap_wcr(struct kvm_vcpu *vcpu, 374static inline bool trap_wcr(struct kvm_vcpu *vcpu,
383 const struct sys_reg_params *p, 375 struct sys_reg_params *p,
384 const struct sys_reg_desc *rd) 376 const struct sys_reg_desc *rd)
385{ 377{
386 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 378 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
@@ -687,7 +679,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
687}; 679};
688 680
689static bool trap_dbgidr(struct kvm_vcpu *vcpu, 681static bool trap_dbgidr(struct kvm_vcpu *vcpu,
690 const struct sys_reg_params *p, 682 struct sys_reg_params *p,
691 const struct sys_reg_desc *r) 683 const struct sys_reg_desc *r)
692{ 684{
693 if (p->is_write) { 685 if (p->is_write) {
@@ -697,23 +689,23 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
697 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 689 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1);
698 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 690 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT);
699 691
700 *vcpu_reg(vcpu, p->Rt) = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 692 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
701 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 693 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
702 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) | 694 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
703 (6 << 16) | (el3 << 14) | (el3 << 12)); 695 | (6 << 16) | (el3 << 14) | (el3 << 12));
704 return true; 696 return true;
705 } 697 }
706} 698}
707 699
708static bool trap_debug32(struct kvm_vcpu *vcpu, 700static bool trap_debug32(struct kvm_vcpu *vcpu,
709 const struct sys_reg_params *p, 701 struct sys_reg_params *p,
710 const struct sys_reg_desc *r) 702 const struct sys_reg_desc *r)
711{ 703{
712 if (p->is_write) { 704 if (p->is_write) {
713 vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt); 705 vcpu_cp14(vcpu, r->reg) = p->regval;
714 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 706 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
715 } else { 707 } else {
716 *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg); 708 p->regval = vcpu_cp14(vcpu, r->reg);
717 } 709 }
718 710
719 return true; 711 return true;
@@ -731,7 +723,7 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
731 */ 723 */
732 724
733static inline bool trap_xvr(struct kvm_vcpu *vcpu, 725static inline bool trap_xvr(struct kvm_vcpu *vcpu,
734 const struct sys_reg_params *p, 726 struct sys_reg_params *p,
735 const struct sys_reg_desc *rd) 727 const struct sys_reg_desc *rd)
736{ 728{
737 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 729 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
@@ -740,12 +732,12 @@ static inline bool trap_xvr(struct kvm_vcpu *vcpu,
740 u64 val = *dbg_reg; 732 u64 val = *dbg_reg;
741 733
742 val &= 0xffffffffUL; 734 val &= 0xffffffffUL;
743 val |= *vcpu_reg(vcpu, p->Rt) << 32; 735 val |= p->regval << 32;
744 *dbg_reg = val; 736 *dbg_reg = val;
745 737
746 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 738 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
747 } else { 739 } else {
748 *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32; 740 p->regval = *dbg_reg >> 32;
749 } 741 }
750 742
751 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 743 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
@@ -991,7 +983,7 @@ int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
991 * Return 0 if the access has been handled, and -1 if not. 983 * Return 0 if the access has been handled, and -1 if not.
992 */ 984 */
993static int emulate_cp(struct kvm_vcpu *vcpu, 985static int emulate_cp(struct kvm_vcpu *vcpu,
994 const struct sys_reg_params *params, 986 struct sys_reg_params *params,
995 const struct sys_reg_desc *table, 987 const struct sys_reg_desc *table,
996 size_t num) 988 size_t num)
997{ 989{
@@ -1062,12 +1054,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1062{ 1054{
1063 struct sys_reg_params params; 1055 struct sys_reg_params params;
1064 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1056 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1057 int Rt = (hsr >> 5) & 0xf;
1065 int Rt2 = (hsr >> 10) & 0xf; 1058 int Rt2 = (hsr >> 10) & 0xf;
1066 1059
1067 params.is_aarch32 = true; 1060 params.is_aarch32 = true;
1068 params.is_32bit = false; 1061 params.is_32bit = false;
1069 params.CRm = (hsr >> 1) & 0xf; 1062 params.CRm = (hsr >> 1) & 0xf;
1070 params.Rt = (hsr >> 5) & 0xf;
1071 params.is_write = ((hsr & 1) == 0); 1063 params.is_write = ((hsr & 1) == 0);
1072 1064
1073 params.Op0 = 0; 1065 params.Op0 = 0;
@@ -1076,15 +1068,12 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1076 params.CRn = 0; 1068 params.CRn = 0;
1077 1069
1078 /* 1070 /*
1079 * Massive hack here. Store Rt2 in the top 32bits so we only 1071 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
1080 * have one register to deal with. As we use the same trap
1081 * backends between AArch32 and AArch64, we get away with it. 1072 * backends between AArch32 and AArch64, we get away with it.
1082 */ 1073 */
1083 if (params.is_write) { 1074 if (params.is_write) {
1084 u64 val = *vcpu_reg(vcpu, params.Rt); 1075 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
1085 val &= 0xffffffff; 1076 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
1086 val |= *vcpu_reg(vcpu, Rt2) << 32;
1087 *vcpu_reg(vcpu, params.Rt) = val;
1088 } 1077 }
1089 1078
1090 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1079 if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
@@ -1095,11 +1084,10 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
1095 unhandled_cp_access(vcpu, &params); 1084 unhandled_cp_access(vcpu, &params);
1096 1085
1097out: 1086out:
1098 /* Do the opposite hack for the read side */ 1087 /* Split up the value between registers for the read side */
1099 if (!params.is_write) { 1088 if (!params.is_write) {
1100 u64 val = *vcpu_reg(vcpu, params.Rt); 1089 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
1101 val >>= 32; 1090 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
1102 *vcpu_reg(vcpu, Rt2) = val;
1103 } 1091 }
1104 1092
1105 return 1; 1093 return 1;
@@ -1118,21 +1106,24 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
1118{ 1106{
1119 struct sys_reg_params params; 1107 struct sys_reg_params params;
1120 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1108 u32 hsr = kvm_vcpu_get_hsr(vcpu);
1109 int Rt = (hsr >> 5) & 0xf;
1121 1110
1122 params.is_aarch32 = true; 1111 params.is_aarch32 = true;
1123 params.is_32bit = true; 1112 params.is_32bit = true;
1124 params.CRm = (hsr >> 1) & 0xf; 1113 params.CRm = (hsr >> 1) & 0xf;
1125 params.Rt = (hsr >> 5) & 0xf; 1114 params.regval = vcpu_get_reg(vcpu, Rt);
1126 params.is_write = ((hsr & 1) == 0); 1115 params.is_write = ((hsr & 1) == 0);
1127 params.CRn = (hsr >> 10) & 0xf; 1116 params.CRn = (hsr >> 10) & 0xf;
1128 params.Op0 = 0; 1117 params.Op0 = 0;
1129 params.Op1 = (hsr >> 14) & 0x7; 1118 params.Op1 = (hsr >> 14) & 0x7;
1130 params.Op2 = (hsr >> 17) & 0x7; 1119 params.Op2 = (hsr >> 17) & 0x7;
1131 1120
1132 if (!emulate_cp(vcpu, &params, target_specific, nr_specific)) 1121 if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
1133 return 1; 1122 !emulate_cp(vcpu, &params, global, nr_global)) {
1134 if (!emulate_cp(vcpu, &params, global, nr_global)) 1123 if (!params.is_write)
1124 vcpu_set_reg(vcpu, Rt, params.regval);
1135 return 1; 1125 return 1;
1126 }
1136 1127
1137 unhandled_cp_access(vcpu, &params); 1128 unhandled_cp_access(vcpu, &params);
1138 return 1; 1129 return 1;
@@ -1175,7 +1166,7 @@ int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
1175} 1166}
1176 1167
1177static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1168static int emulate_sys_reg(struct kvm_vcpu *vcpu,
1178 const struct sys_reg_params *params) 1169 struct sys_reg_params *params)
1179{ 1170{
1180 size_t num; 1171 size_t num;
1181 const struct sys_reg_desc *table, *r; 1172 const struct sys_reg_desc *table, *r;
@@ -1230,6 +1221,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1230{ 1221{
1231 struct sys_reg_params params; 1222 struct sys_reg_params params;
1232 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1223 unsigned long esr = kvm_vcpu_get_hsr(vcpu);
1224 int Rt = (esr >> 5) & 0x1f;
1225 int ret;
1233 1226
1234 trace_kvm_handle_sys_reg(esr); 1227 trace_kvm_handle_sys_reg(esr);
1235 1228
@@ -1240,10 +1233,14 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
1240 params.CRn = (esr >> 10) & 0xf; 1233 params.CRn = (esr >> 10) & 0xf;
1241 params.CRm = (esr >> 1) & 0xf; 1234 params.CRm = (esr >> 1) & 0xf;
1242 params.Op2 = (esr >> 17) & 0x7; 1235 params.Op2 = (esr >> 17) & 0x7;
1243 params.Rt = (esr >> 5) & 0x1f; 1236 params.regval = vcpu_get_reg(vcpu, Rt);
1244 params.is_write = !(esr & 1); 1237 params.is_write = !(esr & 1);
1245 1238
1246 return emulate_sys_reg(vcpu, &params); 1239 ret = emulate_sys_reg(vcpu, &params);
1240
1241 if (!params.is_write)
1242 vcpu_set_reg(vcpu, Rt, params.regval);
1243 return ret;
1247} 1244}
1248 1245
1249/****************************************************************************** 1246/******************************************************************************
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index eaa324e4db4d..dbbb01cfbee9 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -28,7 +28,7 @@ struct sys_reg_params {
28 u8 CRn; 28 u8 CRn;
29 u8 CRm; 29 u8 CRm;
30 u8 Op2; 30 u8 Op2;
31 u8 Rt; 31 u64 regval;
32 bool is_write; 32 bool is_write;
33 bool is_aarch32; 33 bool is_aarch32;
34 bool is_32bit; /* Only valid if is_aarch32 is true */ 34 bool is_32bit; /* Only valid if is_aarch32 is true */
@@ -44,7 +44,7 @@ struct sys_reg_desc {
44 44
45 /* Trapped access from guest, if non-NULL. */ 45 /* Trapped access from guest, if non-NULL. */
46 bool (*access)(struct kvm_vcpu *, 46 bool (*access)(struct kvm_vcpu *,
47 const struct sys_reg_params *, 47 struct sys_reg_params *,
48 const struct sys_reg_desc *); 48 const struct sys_reg_desc *);
49 49
50 /* Initialization for vcpu. */ 50 /* Initialization for vcpu. */
@@ -77,9 +77,9 @@ static inline bool ignore_write(struct kvm_vcpu *vcpu,
77} 77}
78 78
79static inline bool read_zero(struct kvm_vcpu *vcpu, 79static inline bool read_zero(struct kvm_vcpu *vcpu,
80 const struct sys_reg_params *p) 80 struct sys_reg_params *p)
81{ 81{
82 *vcpu_reg(vcpu, p->Rt) = 0; 82 p->regval = 0;
83 return true; 83 return true;
84} 84}
85 85
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 1e4576824165..ed90578fa120 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -31,13 +31,13 @@
31#include "sys_regs.h" 31#include "sys_regs.h"
32 32
33static bool access_actlr(struct kvm_vcpu *vcpu, 33static bool access_actlr(struct kvm_vcpu *vcpu,
34 const struct sys_reg_params *p, 34 struct sys_reg_params *p,
35 const struct sys_reg_desc *r) 35 const struct sys_reg_desc *r)
36{ 36{
37 if (p->is_write) 37 if (p->is_write)
38 return ignore_write(vcpu, p); 38 return ignore_write(vcpu, p);
39 39
40 *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, ACTLR_EL1); 40 p->regval = vcpu_sys_reg(vcpu, ACTLR_EL1);
41 return true; 41 return true;
42} 42}
43 43
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index f636a2639f03..e87f53ff5f58 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -76,13 +76,28 @@ static void flush_context(unsigned int cpu)
76 __flush_icache_all(); 76 __flush_icache_all();
77} 77}
78 78
79static int is_reserved_asid(u64 asid) 79static bool check_update_reserved_asid(u64 asid, u64 newasid)
80{ 80{
81 int cpu; 81 int cpu;
82 for_each_possible_cpu(cpu) 82 bool hit = false;
83 if (per_cpu(reserved_asids, cpu) == asid) 83
84 return 1; 84 /*
85 return 0; 85 * Iterate over the set of reserved ASIDs looking for a match.
86 * If we find one, then we can update our mm to use newasid
87 * (i.e. the same ASID in the current generation) but we can't
88 * exit the loop early, since we need to ensure that all copies
89 * of the old ASID are updated to reflect the mm. Failure to do
90 * so could result in us missing the reserved ASID in a future
91 * generation.
92 */
93 for_each_possible_cpu(cpu) {
94 if (per_cpu(reserved_asids, cpu) == asid) {
95 hit = true;
96 per_cpu(reserved_asids, cpu) = newasid;
97 }
98 }
99
100 return hit;
86} 101}
87 102
88static u64 new_context(struct mm_struct *mm, unsigned int cpu) 103static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -92,12 +107,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
92 u64 generation = atomic64_read(&asid_generation); 107 u64 generation = atomic64_read(&asid_generation);
93 108
94 if (asid != 0) { 109 if (asid != 0) {
110 u64 newasid = generation | (asid & ~ASID_MASK);
111
95 /* 112 /*
96 * If our current ASID was active during a rollover, we 113 * If our current ASID was active during a rollover, we
97 * can continue to use it and this was just a false alarm. 114 * can continue to use it and this was just a false alarm.
98 */ 115 */
99 if (is_reserved_asid(asid)) 116 if (check_update_reserved_asid(asid, newasid))
100 return generation | (asid & ~ASID_MASK); 117 return newasid;
101 118
102 /* 119 /*
103 * We had a valid ASID in a previous life, so try to re-use 120 * We had a valid ASID in a previous life, so try to re-use
@@ -105,7 +122,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
105 */ 122 */
106 asid &= ~ASID_MASK; 123 asid &= ~ASID_MASK;
107 if (!__test_and_set_bit(asid, asid_map)) 124 if (!__test_and_set_bit(asid, asid_map))
108 goto bump_gen; 125 return newasid;
109 } 126 }
110 127
111 /* 128 /*
@@ -129,10 +146,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
129set_asid: 146set_asid:
130 __set_bit(asid, asid_map); 147 __set_bit(asid, asid_map);
131 cur_idx = asid; 148 cur_idx = asid;
132 149 return asid | generation;
133bump_gen:
134 asid |= generation;
135 return asid;
136} 150}
137 151
138void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) 152void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 19211c4a8911..92ddac1e8ca2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -393,16 +393,16 @@ static struct fault_info {
393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, 393 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, 394 { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, 395 { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
396 { do_bad, SIGBUS, 0, "reserved access flag fault" }, 396 { do_bad, SIGBUS, 0, "unknown 8" },
397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, 397 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, 398 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, 399 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
400 { do_bad, SIGBUS, 0, "reserved permission fault" }, 400 { do_bad, SIGBUS, 0, "unknown 12" },
401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, 401 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, 402 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, 403 { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
404 { do_bad, SIGBUS, 0, "synchronous external abort" }, 404 { do_bad, SIGBUS, 0, "synchronous external abort" },
405 { do_bad, SIGBUS, 0, "asynchronous external abort" }, 405 { do_bad, SIGBUS, 0, "unknown 17" },
406 { do_bad, SIGBUS, 0, "unknown 18" }, 406 { do_bad, SIGBUS, 0, "unknown 18" },
407 { do_bad, SIGBUS, 0, "unknown 19" }, 407 { do_bad, SIGBUS, 0, "unknown 19" },
408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 408 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
@@ -410,16 +410,16 @@ static struct fault_info {
410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 410 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, 411 { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
412 { do_bad, SIGBUS, 0, "synchronous parity error" }, 412 { do_bad, SIGBUS, 0, "synchronous parity error" },
413 { do_bad, SIGBUS, 0, "asynchronous parity error" }, 413 { do_bad, SIGBUS, 0, "unknown 25" },
414 { do_bad, SIGBUS, 0, "unknown 26" }, 414 { do_bad, SIGBUS, 0, "unknown 26" },
415 { do_bad, SIGBUS, 0, "unknown 27" }, 415 { do_bad, SIGBUS, 0, "unknown 27" },
416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 416 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 417 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 418 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, 419 { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
420 { do_bad, SIGBUS, 0, "unknown 32" }, 420 { do_bad, SIGBUS, 0, "unknown 32" },
421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, 421 { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
422 { do_bad, SIGBUS, 0, "debug event" }, 422 { do_bad, SIGBUS, 0, "unknown 34" },
423 { do_bad, SIGBUS, 0, "unknown 35" }, 423 { do_bad, SIGBUS, 0, "unknown 35" },
424 { do_bad, SIGBUS, 0, "unknown 36" }, 424 { do_bad, SIGBUS, 0, "unknown 36" },
425 { do_bad, SIGBUS, 0, "unknown 37" }, 425 { do_bad, SIGBUS, 0, "unknown 37" },
@@ -433,21 +433,21 @@ static struct fault_info {
433 { do_bad, SIGBUS, 0, "unknown 45" }, 433 { do_bad, SIGBUS, 0, "unknown 45" },
434 { do_bad, SIGBUS, 0, "unknown 46" }, 434 { do_bad, SIGBUS, 0, "unknown 46" },
435 { do_bad, SIGBUS, 0, "unknown 47" }, 435 { do_bad, SIGBUS, 0, "unknown 47" },
436 { do_bad, SIGBUS, 0, "unknown 48" }, 436 { do_bad, SIGBUS, 0, "TLB conflict abort" },
437 { do_bad, SIGBUS, 0, "unknown 49" }, 437 { do_bad, SIGBUS, 0, "unknown 49" },
438 { do_bad, SIGBUS, 0, "unknown 50" }, 438 { do_bad, SIGBUS, 0, "unknown 50" },
439 { do_bad, SIGBUS, 0, "unknown 51" }, 439 { do_bad, SIGBUS, 0, "unknown 51" },
440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, 440 { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
441 { do_bad, SIGBUS, 0, "unknown 53" }, 441 { do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
442 { do_bad, SIGBUS, 0, "unknown 54" }, 442 { do_bad, SIGBUS, 0, "unknown 54" },
443 { do_bad, SIGBUS, 0, "unknown 55" }, 443 { do_bad, SIGBUS, 0, "unknown 55" },
444 { do_bad, SIGBUS, 0, "unknown 56" }, 444 { do_bad, SIGBUS, 0, "unknown 56" },
445 { do_bad, SIGBUS, 0, "unknown 57" }, 445 { do_bad, SIGBUS, 0, "unknown 57" },
446 { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, 446 { do_bad, SIGBUS, 0, "unknown 58" },
447 { do_bad, SIGBUS, 0, "unknown 59" }, 447 { do_bad, SIGBUS, 0, "unknown 59" },
448 { do_bad, SIGBUS, 0, "unknown 60" }, 448 { do_bad, SIGBUS, 0, "unknown 60" },
449 { do_bad, SIGBUS, 0, "unknown 61" }, 449 { do_bad, SIGBUS, 0, "section domain fault" },
450 { do_bad, SIGBUS, 0, "unknown 62" }, 450 { do_bad, SIGBUS, 0, "page domain fault" },
451 { do_bad, SIGBUS, 0, "unknown 63" }, 451 { do_bad, SIGBUS, 0, "unknown 63" },
452}; 452};
453 453
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index abb66f84d4ac..873e363048c6 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -64,8 +64,12 @@ EXPORT_SYMBOL(phys_mem_access_prot);
64 64
65static void __init *early_alloc(unsigned long sz) 65static void __init *early_alloc(unsigned long sz)
66{ 66{
67 void *ptr = __va(memblock_alloc(sz, sz)); 67 phys_addr_t phys;
68 BUG_ON(!ptr); 68 void *ptr;
69
70 phys = memblock_alloc(sz, sz);
71 BUG_ON(!phys);
72 ptr = __va(phys);
69 memset(ptr, 0, sz); 73 memset(ptr, 0, sz);
70 return ptr; 74 return ptr;
71} 75}
@@ -81,55 +85,19 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
81 do { 85 do {
82 /* 86 /*
83 * Need to have the least restrictive permissions available 87 * Need to have the least restrictive permissions available
84 * permissions will be fixed up later. Default the new page 88 * permissions will be fixed up later
85 * range as contiguous ptes.
86 */ 89 */
87 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC_CONT)); 90 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
88 pfn++; 91 pfn++;
89 } while (pte++, i++, i < PTRS_PER_PTE); 92 } while (pte++, i++, i < PTRS_PER_PTE);
90} 93}
91 94
92/*
93 * Given a PTE with the CONT bit set, determine where the CONT range
94 * starts, and clear the entire range of PTE CONT bits.
95 */
96static void clear_cont_pte_range(pte_t *pte, unsigned long addr)
97{
98 int i;
99
100 pte -= CONT_RANGE_OFFSET(addr);
101 for (i = 0; i < CONT_PTES; i++) {
102 set_pte(pte, pte_mknoncont(*pte));
103 pte++;
104 }
105 flush_tlb_all();
106}
107
108/*
109 * Given a range of PTEs set the pfn and provided page protection flags
110 */
111static void __populate_init_pte(pte_t *pte, unsigned long addr,
112 unsigned long end, phys_addr_t phys,
113 pgprot_t prot)
114{
115 unsigned long pfn = __phys_to_pfn(phys);
116
117 do {
118 /* clear all the bits except the pfn, then apply the prot */
119 set_pte(pte, pfn_pte(pfn, prot));
120 pte++;
121 pfn++;
122 addr += PAGE_SIZE;
123 } while (addr != end);
124}
125
126static void alloc_init_pte(pmd_t *pmd, unsigned long addr, 95static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
127 unsigned long end, phys_addr_t phys, 96 unsigned long end, unsigned long pfn,
128 pgprot_t prot, 97 pgprot_t prot,
129 void *(*alloc)(unsigned long size)) 98 void *(*alloc)(unsigned long size))
130{ 99{
131 pte_t *pte; 100 pte_t *pte;
132 unsigned long next;
133 101
134 if (pmd_none(*pmd) || pmd_sect(*pmd)) { 102 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
135 pte = alloc(PTRS_PER_PTE * sizeof(pte_t)); 103 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
@@ -142,27 +110,9 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
142 110
143 pte = pte_offset_kernel(pmd, addr); 111 pte = pte_offset_kernel(pmd, addr);
144 do { 112 do {
145 next = min(end, (addr + CONT_SIZE) & CONT_MASK); 113 set_pte(pte, pfn_pte(pfn, prot));
146 if (((addr | next | phys) & ~CONT_MASK) == 0) { 114 pfn++;
147 /* a block of CONT_PTES */ 115 } while (pte++, addr += PAGE_SIZE, addr != end);
148 __populate_init_pte(pte, addr, next, phys,
149 __pgprot(pgprot_val(prot) | PTE_CONT));
150 } else {
151 /*
152 * If the range being split is already inside of a
153 * contiguous range but this PTE isn't going to be
154 * contiguous, then we want to unmark the adjacent
155 * ranges, then update the portion of the range we
156 * are interrested in.
157 */
158 clear_cont_pte_range(pte, addr);
159 __populate_init_pte(pte, addr, next, phys, prot);
160 }
161
162 pte += (next - addr) >> PAGE_SHIFT;
163 phys += next - addr;
164 addr = next;
165 } while (addr != end);
166} 116}
167 117
168static void split_pud(pud_t *old_pud, pmd_t *pmd) 118static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -223,7 +173,8 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
223 } 173 }
224 } 174 }
225 } else { 175 } else {
226 alloc_init_pte(pmd, addr, next, phys, prot, alloc); 176 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
177 prot, alloc);
227 } 178 }
228 phys += next - addr; 179 phys += next - addr;
229 } while (pmd++, addr = next, addr != end); 180 } while (pmd++, addr = next, addr != end);
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index d6a53ef2350b..b162ad70effc 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -139,6 +139,12 @@ static inline int epilogue_offset(const struct jit_ctx *ctx)
139/* Stack must be multiples of 16B */ 139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15) 140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141 141
142#define _STACK_SIZE \
143 (MAX_BPF_STACK \
144 + 4 /* extra for skb_copy_bits buffer */)
145
146#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
147
142static void build_prologue(struct jit_ctx *ctx) 148static void build_prologue(struct jit_ctx *ctx)
143{ 149{
144 const u8 r6 = bpf2a64[BPF_REG_6]; 150 const u8 r6 = bpf2a64[BPF_REG_6];
@@ -150,10 +156,6 @@ static void build_prologue(struct jit_ctx *ctx)
150 const u8 rx = bpf2a64[BPF_REG_X]; 156 const u8 rx = bpf2a64[BPF_REG_X];
151 const u8 tmp1 = bpf2a64[TMP_REG_1]; 157 const u8 tmp1 = bpf2a64[TMP_REG_1];
152 const u8 tmp2 = bpf2a64[TMP_REG_2]; 158 const u8 tmp2 = bpf2a64[TMP_REG_2];
153 int stack_size = MAX_BPF_STACK;
154
155 stack_size += 4; /* extra for skb_copy_bits buffer */
156 stack_size = STACK_ALIGN(stack_size);
157 159
158 /* 160 /*
159 * BPF prog stack layout 161 * BPF prog stack layout
@@ -165,12 +167,13 @@ static void build_prologue(struct jit_ctx *ctx)
165 * | ... | callee saved registers 167 * | ... | callee saved registers
166 * +-----+ 168 * +-----+
167 * | | x25/x26 169 * | | x25/x26
168 * BPF fp register => -80:+-----+ 170 * BPF fp register => -80:+-----+ <= (BPF_FP)
169 * | | 171 * | |
170 * | ... | BPF prog stack 172 * | ... | BPF prog stack
171 * | | 173 * | |
172 * | | 174 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
173 * current A64_SP => +-----+ 175 * |RSVD | JIT scratchpad
176 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
174 * | | 177 * | |
175 * | ... | Function call stack 178 * | ... | Function call stack
176 * | | 179 * | |
@@ -196,7 +199,7 @@ static void build_prologue(struct jit_ctx *ctx)
196 emit(A64_MOV(1, fp, A64_SP), ctx); 199 emit(A64_MOV(1, fp, A64_SP), ctx);
197 200
198 /* Set up function call stack */ 201 /* Set up function call stack */
199 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); 202 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
200 203
201 /* Clear registers A and X */ 204 /* Clear registers A and X */
202 emit_a64_mov_i64(ra, 0, ctx); 205 emit_a64_mov_i64(ra, 0, ctx);
@@ -213,13 +216,9 @@ static void build_epilogue(struct jit_ctx *ctx)
213 const u8 fp = bpf2a64[BPF_REG_FP]; 216 const u8 fp = bpf2a64[BPF_REG_FP];
214 const u8 tmp1 = bpf2a64[TMP_REG_1]; 217 const u8 tmp1 = bpf2a64[TMP_REG_1];
215 const u8 tmp2 = bpf2a64[TMP_REG_2]; 218 const u8 tmp2 = bpf2a64[TMP_REG_2];
216 int stack_size = MAX_BPF_STACK;
217
218 stack_size += 4; /* extra for skb_copy_bits buffer */
219 stack_size = STACK_ALIGN(stack_size);
220 219
221 /* We're done with BPF stack */ 220 /* We're done with BPF stack */
222 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); 221 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
223 222
224 /* Restore fs (x25) and x26 */ 223 /* Restore fs (x25) and x26 */
225 emit(A64_POP(fp, A64_R(26), A64_SP), ctx); 224 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
@@ -591,7 +590,25 @@ emit_cond_jmp:
591 case BPF_ST | BPF_MEM | BPF_H: 590 case BPF_ST | BPF_MEM | BPF_H:
592 case BPF_ST | BPF_MEM | BPF_B: 591 case BPF_ST | BPF_MEM | BPF_B:
593 case BPF_ST | BPF_MEM | BPF_DW: 592 case BPF_ST | BPF_MEM | BPF_DW:
594 goto notyet; 593 /* Load imm to a register then store it */
594 ctx->tmp_used = 1;
595 emit_a64_mov_i(1, tmp2, off, ctx);
596 emit_a64_mov_i(1, tmp, imm, ctx);
597 switch (BPF_SIZE(code)) {
598 case BPF_W:
599 emit(A64_STR32(tmp, dst, tmp2), ctx);
600 break;
601 case BPF_H:
602 emit(A64_STRH(tmp, dst, tmp2), ctx);
603 break;
604 case BPF_B:
605 emit(A64_STRB(tmp, dst, tmp2), ctx);
606 break;
607 case BPF_DW:
608 emit(A64_STR64(tmp, dst, tmp2), ctx);
609 break;
610 }
611 break;
595 612
596 /* STX: *(size *)(dst + off) = src */ 613 /* STX: *(size *)(dst + off) = src */
597 case BPF_STX | BPF_MEM | BPF_W: 614 case BPF_STX | BPF_MEM | BPF_W:
@@ -658,7 +675,7 @@ emit_cond_jmp:
658 return -EINVAL; 675 return -EINVAL;
659 } 676 }
660 emit_a64_mov_i64(r3, size, ctx); 677 emit_a64_mov_i64(r3, size, ctx);
661 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); 678 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
662 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); 679 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
663 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); 680 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
664 emit(A64_MOV(1, A64_FP, A64_SP), ctx); 681 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 1e9c8b0bf486..170d786807c4 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -14,7 +14,7 @@
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput 15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 * 19 *
20 * ppc: 20 * ppc:
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index db73390568c8..74c132d901bd 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
11 11
12 12
13 13
14#define NR_syscalls 322 /* length of syscall table */ 14#define NR_syscalls 323 /* length of syscall table */
15 15
16/* 16/*
17 * The following defines stop scripts/checksyscalls.sh from complaining about 17 * The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 9038726e7d26..762edce7572e 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -335,5 +335,6 @@
335#define __NR_userfaultfd 1343 335#define __NR_userfaultfd 1343
336#define __NR_membarrier 1344 336#define __NR_membarrier 1344
337#define __NR_kcmp 1345 337#define __NR_kcmp 1345
338#define __NR_mlock2 1346
338 339
339#endif /* _UAPI_ASM_IA64_UNISTD_H */ 340#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index dcd97f84d065..534a74acb849 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1771,5 +1771,6 @@ sys_call_table:
1771 data8 sys_userfaultfd 1771 data8 sys_userfaultfd
1772 data8 sys_membarrier 1772 data8 sys_membarrier
1773 data8 sys_kcmp // 1345 1773 data8 sys_kcmp // 1345
1774 data8 sys_mlock2
1774 1775
1775 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1776 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/m68k/coldfire/m54xx.c b/arch/m68k/coldfire/m54xx.c
index f7836c6a6b60..c32f76791f48 100644
--- a/arch/m68k/coldfire/m54xx.c
+++ b/arch/m68k/coldfire/m54xx.c
@@ -98,7 +98,7 @@ static void __init mcf54xx_bootmem_alloc(void)
98 memstart = PAGE_ALIGN(_ramstart); 98 memstart = PAGE_ALIGN(_ramstart);
99 min_low_pfn = PFN_DOWN(_rambase); 99 min_low_pfn = PFN_DOWN(_rambase);
100 start_pfn = PFN_DOWN(memstart); 100 start_pfn = PFN_DOWN(memstart);
101 max_low_pfn = PFN_DOWN(_ramend); 101 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
102 high_memory = (void *)_ramend; 102 high_memory = (void *)_ramend;
103 103
104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6; 104 m68k_virt_to_node_shift = fls(_ramend - _rambase - 1) - 6;
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 0793a7f17417..f9d96bf86910 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 375 7#define NR_syscalls 376
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 5e6fae6c275f..36cf129de663 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -380,5 +380,6 @@
380#define __NR_sendmmsg 372 380#define __NR_sendmmsg 372
381#define __NR_userfaultfd 373 381#define __NR_userfaultfd 373
382#define __NR_membarrier 374 382#define __NR_membarrier 374
383#define __NR_mlock2 375
383 384
384#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 385#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
index 88c27d94a721..76b9113f3092 100644
--- a/arch/m68k/kernel/setup_no.c
+++ b/arch/m68k/kernel/setup_no.c
@@ -238,11 +238,14 @@ void __init setup_arch(char **cmdline_p)
238 * Give all the memory to the bootmap allocator, tell it to put the 238 * Give all the memory to the bootmap allocator, tell it to put the
239 * boot mem_map at the start of memory. 239 * boot mem_map at the start of memory.
240 */ 240 */
241 min_low_pfn = PFN_DOWN(memory_start);
242 max_pfn = max_low_pfn = PFN_DOWN(memory_end);
243
241 bootmap_size = init_bootmem_node( 244 bootmap_size = init_bootmem_node(
242 NODE_DATA(0), 245 NODE_DATA(0),
243 memory_start >> PAGE_SHIFT, /* map goes here */ 246 min_low_pfn, /* map goes here */
244 PAGE_OFFSET >> PAGE_SHIFT, /* 0 on coldfire */ 247 PFN_DOWN(PAGE_OFFSET),
245 memory_end >> PAGE_SHIFT); 248 max_pfn);
246 /* 249 /*
247 * Free the usable memory, we have to make sure we do not free 250 * Free the usable memory, we have to make sure we do not free
248 * the bootmem bitmap so we then reserve it after freeing it :-) 251 * the bootmem bitmap so we then reserve it after freeing it :-)
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 5dd0e80042f5..282cd903f4c4 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -395,3 +395,4 @@ ENTRY(sys_call_table)
395 .long sys_sendmmsg 395 .long sys_sendmmsg
396 .long sys_userfaultfd 396 .long sys_userfaultfd
397 .long sys_membarrier 397 .long sys_membarrier
398 .long sys_mlock2 /* 375 */
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index b958916e5eac..8f37fdd80be9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -250,7 +250,7 @@ void __init paging_init(void)
250 high_memory = phys_to_virt(max_addr); 250 high_memory = phys_to_virt(max_addr);
251 251
252 min_low_pfn = availmem >> PAGE_SHIFT; 252 min_low_pfn = availmem >> PAGE_SHIFT;
253 max_low_pfn = max_addr >> PAGE_SHIFT; 253 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
254 254
255 for (i = 0; i < m68k_num_memory; i++) { 255 for (i = 0; i < m68k_num_memory; i++) {
256 addr = m68k_memory[i].addr; 256 addr = m68k_memory[i].addr;
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index a8b942bf7163..2a5f43a68ae3 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -118,13 +118,13 @@ static void __init sun3_bootmem_alloc(unsigned long memory_start,
118 memory_end = memory_end & PAGE_MASK; 118 memory_end = memory_end & PAGE_MASK;
119 119
120 start_page = __pa(memory_start) >> PAGE_SHIFT; 120 start_page = __pa(memory_start) >> PAGE_SHIFT;
121 num_pages = __pa(memory_end) >> PAGE_SHIFT; 121 max_pfn = num_pages = __pa(memory_end) >> PAGE_SHIFT;
122 122
123 high_memory = (void *)memory_end; 123 high_memory = (void *)memory_end;
124 availmem = memory_start; 124 availmem = memory_start;
125 125
126 m68k_setup_node(0); 126 m68k_setup_node(0);
127 availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); 127 availmem += init_bootmem(start_page, num_pages);
128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; 128 availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK;
129 129
130 free_bootmem(__pa(availmem), memory_end - (availmem)); 130 free_bootmem(__pa(availmem), memory_end - (availmem));
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index c89da6312954..bf4dec229437 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
61 /* FIXME this part of code is untested */ 61 /* FIXME this part of code is untested */
62 for_each_sg(sgl, sg, nents, i) { 62 for_each_sg(sgl, sg, nents, i) {
63 sg->dma_address = sg_phys(sg); 63 sg->dma_address = sg_phys(sg);
64 __dma_sync(sg_phys(sg), sg->length, direction); 64 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65 sg->length, direction);
65 } 66 }
66 67
67 return nents; 68 return nents;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 5305d694ffe5..095ecafe6bd3 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -599,7 +599,7 @@ extern void __put_user_unknown(void);
599 * On error, the variable @x is set to zero. 599 * On error, the variable @x is set to zero.
600 */ 600 */
601#define __get_user_unaligned(x,ptr) \ 601#define __get_user_unaligned(x,ptr) \
602 __get_user__unalignednocheck((x),(ptr),sizeof(*(ptr))) 602 __get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
603 603
604/* 604/*
605 * Yuck. We need two variants, one for 64bit operation and one 605 * Yuck. We need two variants, one for 64bit operation and one
@@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
620do { \ 620do { \
621 switch (size) { \ 621 switch (size) { \
622 case 1: __get_data_asm(val, "lb", ptr); break; \ 622 case 1: __get_data_asm(val, "lb", ptr); break; \
623 case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \ 623 case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
624 case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \ 624 case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
625 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \ 625 case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
626 default: __get_user_unaligned_unknown(); break; \ 626 default: __get_user_unaligned_unknown(); break; \
627 } \ 627 } \
@@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1122 __cu_to = (to); \ 1122 __cu_to = (to); \
1123 __cu_from = (from); \ 1123 __cu_from = (from); \
1124 __cu_len = (n); \ 1124 __cu_len = (n); \
1125 might_fault(); \ 1125 if (eva_kernel_access()) { \
1126 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 1126 __cu_len = __invoke_copy_from_kernel(__cu_to, \
1127 __cu_len); \ 1127 __cu_from, \
1128 __cu_len); \
1129 } else { \
1130 might_fault(); \
1131 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
1132 __cu_len); \
1133 } \
1128 __cu_len; \ 1134 __cu_len; \
1129}) 1135})
1130 1136
@@ -1229,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
1229{ 1235{
1230 __kernel_size_t res; 1236 __kernel_size_t res;
1231 1237
1232 might_fault(); 1238 if (eva_kernel_access()) {
1233 __asm__ __volatile__( 1239 __asm__ __volatile__(
1234 "move\t$4, %1\n\t" 1240 "move\t$4, %1\n\t"
1235 "move\t$5, $0\n\t" 1241 "move\t$5, $0\n\t"
1236 "move\t$6, %2\n\t" 1242 "move\t$6, %2\n\t"
1237 __MODULE_JAL(__bzero) 1243 __MODULE_JAL(__bzero_kernel)
1238 "move\t%0, $6" 1244 "move\t%0, $6"
1239 : "=r" (res) 1245 : "=r" (res)
1240 : "r" (addr), "r" (size) 1246 : "r" (addr), "r" (size)
1241 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 1247 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1248 } else {
1249 might_fault();
1250 __asm__ __volatile__(
1251 "move\t$4, %1\n\t"
1252 "move\t$5, $0\n\t"
1253 "move\t$6, %2\n\t"
1254 __MODULE_JAL(__bzero)
1255 "move\t%0, $6"
1256 : "=r" (res)
1257 : "r" (addr), "r" (size)
1258 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1259 }
1242 1260
1243 return res; 1261 return res;
1244} 1262}
@@ -1384,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
1384 might_fault(); 1402 might_fault();
1385 __asm__ __volatile__( 1403 __asm__ __volatile__(
1386 "move\t$4, %1\n\t" 1404 "move\t$4, %1\n\t"
1387 __MODULE_JAL(__strlen_kernel_asm) 1405 __MODULE_JAL(__strlen_user_asm)
1388 "move\t%0, $2" 1406 "move\t%0, $2"
1389 : "=r" (res) 1407 : "=r" (res)
1390 : "r" (s) 1408 : "r" (s)
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 8fd5a276cad2..ac81edd44563 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -257,7 +257,6 @@ LEAF(mips_cps_core_init)
257 has_mt t0, 3f 257 has_mt t0, 3f
258 258
259 .set push 259 .set push
260 .set mips64r2
261 .set mt 260 .set mt
262 261
263 /* Only allow 1 TC per VPE to execute... */ 262 /* Only allow 1 TC per VPE to execute... */
@@ -376,7 +375,6 @@ LEAF(mips_cps_boot_vpes)
376 nop 375 nop
377 376
378 .set push 377 .set push
379 .set mips64r2
380 .set mt 378 .set mt
381 379
3821: /* Enter VPE configuration state */ 3801: /* Enter VPE configuration state */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 291af0b5c482..e2b6ab74643d 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -17,6 +17,7 @@
17#include <asm/fpu.h> 17#include <asm/fpu.h>
18#include <asm/msa.h> 18#include <asm/msa.h>
19 19
20extern void *__bzero_kernel(void *__s, size_t __count);
20extern void *__bzero(void *__s, size_t __count); 21extern void *__bzero(void *__s, size_t __count);
21extern long __strncpy_from_kernel_nocheck_asm(char *__to, 22extern long __strncpy_from_kernel_nocheck_asm(char *__to,
22 const char *__from, long __len); 23 const char *__from, long __len);
@@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
64EXPORT_SYMBOL(__copy_in_user_eva); 65EXPORT_SYMBOL(__copy_in_user_eva);
65EXPORT_SYMBOL(__copy_to_user_eva); 66EXPORT_SYMBOL(__copy_to_user_eva);
66EXPORT_SYMBOL(__copy_user_inatomic_eva); 67EXPORT_SYMBOL(__copy_user_inatomic_eva);
68EXPORT_SYMBOL(__bzero_kernel);
67#endif 69#endif
68EXPORT_SYMBOL(__bzero); 70EXPORT_SYMBOL(__bzero);
69EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm); 71EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index d5fa3eaf39a1..41b1b090f56f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -1581,7 +1581,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1581 1581
1582 base = (inst >> 21) & 0x1f; 1582 base = (inst >> 21) & 0x1f;
1583 op_inst = (inst >> 16) & 0x1f; 1583 op_inst = (inst >> 16) & 0x1f;
1584 offset = inst & 0xffff; 1584 offset = (int16_t)inst;
1585 cache = (inst >> 16) & 0x3; 1585 cache = (inst >> 16) & 0x3;
1586 op = (inst >> 18) & 0x7; 1586 op = (inst >> 18) & 0x7;
1587 1587
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
index 7bab3a4e8f7d..7e2210846b8b 100644
--- a/arch/mips/kvm/locore.S
+++ b/arch/mips/kvm/locore.S
@@ -157,9 +157,11 @@ FEXPORT(__kvm_mips_vcpu_run)
157 157
158FEXPORT(__kvm_mips_load_asid) 158FEXPORT(__kvm_mips_load_asid)
159 /* Set the ASID for the Guest Kernel */ 159 /* Set the ASID for the Guest Kernel */
160 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 160 PTR_L t0, VCPU_COP0(k1)
161 /* addresses shift to 0x80000000 */ 161 LONG_L t0, COP0_STATUS(t0)
162 bltz t0, 1f /* If kernel */ 162 andi t0, KSU_USER | ST0_ERL | ST0_EXL
163 xori t0, KSU_USER
164 bnez t0, 1f /* If kernel */
163 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 165 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
164 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 166 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1651: 1671:
@@ -474,9 +476,11 @@ __kvm_mips_return_to_guest:
474 mtc0 t0, CP0_EPC 476 mtc0 t0, CP0_EPC
475 477
476 /* Set the ASID for the Guest Kernel */ 478 /* Set the ASID for the Guest Kernel */
477 INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ 479 PTR_L t0, VCPU_COP0(k1)
478 /* addresses shift to 0x80000000 */ 480 LONG_L t0, COP0_STATUS(t0)
479 bltz t0, 1f /* If kernel */ 481 andi t0, KSU_USER | ST0_ERL | ST0_EXL
482 xori t0, KSU_USER
483 bnez t0, 1f /* If kernel */
480 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ 484 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
481 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ 485 INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
4821: 4861:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 49ff3bfc007e..b9b803facdbf 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -279,7 +279,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
279 279
280 if (!gebase) { 280 if (!gebase) {
281 err = -ENOMEM; 281 err = -ENOMEM;
282 goto out_free_cpu; 282 goto out_uninit_cpu;
283 } 283 }
284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", 284 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
285 ALIGN(size, PAGE_SIZE), gebase); 285 ALIGN(size, PAGE_SIZE), gebase);
@@ -343,6 +343,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
343out_free_gebase: 343out_free_gebase:
344 kfree(gebase); 344 kfree(gebase);
345 345
346out_uninit_cpu:
347 kvm_vcpu_uninit(vcpu);
348
346out_free_cpu: 349out_free_cpu:
347 kfree(vcpu); 350 kfree(vcpu);
348 351
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index b8e63fd00375..8f0019a2e5c8 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -283,6 +283,8 @@ LEAF(memset)
2831: 2831:
284#ifndef CONFIG_EVA 284#ifndef CONFIG_EVA
285FEXPORT(__bzero) 285FEXPORT(__bzero)
286#else
287FEXPORT(__bzero_kernel)
286#endif 288#endif
287 __BUILD_BZERO LEGACY_MODE 289 __BUILD_BZERO LEGACY_MODE
288 290
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index d8117be729a2..730d394ce5f0 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
145 145
146 gfp = massage_gfp_flags(dev, gfp); 146 gfp = massage_gfp_flags(dev, gfp);
147 147
148 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC)) 148 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
149 page = dma_alloc_from_contiguous(dev, 149 page = dma_alloc_from_contiguous(dev,
150 count, get_order(size)); 150 count, get_order(size));
151 if (!page) 151 if (!page)
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 8a978022630b..a245cad4372a 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -11,6 +11,7 @@
11 * by the Free Software Foundation. 11 * by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/delay.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/pci.h> 16#include <linux/pci.h>
16#include <linux/io.h> 17#include <linux/io.h>
@@ -220,7 +221,6 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
220static int rt288x_pci_probe(struct platform_device *pdev) 221static int rt288x_pci_probe(struct platform_device *pdev)
221{ 222{
222 void __iomem *io_map_base; 223 void __iomem *io_map_base;
223 int i;
224 224
225 rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE); 225 rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
226 226
@@ -232,8 +232,7 @@ static int rt288x_pci_probe(struct platform_device *pdev)
232 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1; 232 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
233 233
234 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR); 234 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
235 for (i = 0; i < 0xfffff; i++) 235 udelay(1);
236 ;
237 236
238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL); 237 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR); 238 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 4f925e06c414..9d293b3e9130 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -10,6 +10,8 @@
10 * option) any later version. 10 * option) any later version.
11 */ 11 */
12 12
13#include <linux/delay.h>
14
13#include <asm/bootinfo.h> 15#include <asm/bootinfo.h>
14#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
15#include <asm/idle.h> 17#include <asm/idle.h>
@@ -37,7 +39,6 @@ extern void msp_serial_setup(void);
37void msp7120_reset(void) 39void msp7120_reset(void)
38{ 40{
39 void *start, *end, *iptr; 41 void *start, *end, *iptr;
40 register int i;
41 42
42 /* Diasble all interrupts */ 43 /* Diasble all interrupts */
43 local_irq_disable(); 44 local_irq_disable();
@@ -77,7 +78,7 @@ void msp7120_reset(void)
77 */ 78 */
78 79
79 /* Wait a bit for the DDRC to settle */ 80 /* Wait a bit for the DDRC to settle */
80 for (i = 0; i < 100000000; i++); 81 mdelay(125);
81 82
82#if defined(CONFIG_PMC_MSP7120_GW) 83#if defined(CONFIG_PMC_MSP7120_GW)
83 /* 84 /*
diff --git a/arch/mips/sni/reset.c b/arch/mips/sni/reset.c
index 244f9427625b..6afa34346b81 100644
--- a/arch/mips/sni/reset.c
+++ b/arch/mips/sni/reset.c
@@ -3,6 +3,8 @@
3 * 3 *
4 * Reset a SNI machine. 4 * Reset a SNI machine.
5 */ 5 */
6#include <linux/delay.h>
7
6#include <asm/io.h> 8#include <asm/io.h>
7#include <asm/reboot.h> 9#include <asm/reboot.h>
8#include <asm/sni.h> 10#include <asm/sni.h>
@@ -24,7 +26,7 @@ static inline void kb_wait(void)
24/* XXX This ends up at the ARC firmware prompt ... */ 26/* XXX This ends up at the ARC firmware prompt ... */
25void sni_machine_restart(char *command) 27void sni_machine_restart(char *command)
26{ 28{
27 int i, j; 29 int i;
28 30
29 /* This does a normal via the keyboard controller like a PC. 31 /* This does a normal via the keyboard controller like a PC.
30 We can do that easier ... */ 32 We can do that easier ... */
@@ -32,9 +34,9 @@ void sni_machine_restart(char *command)
32 for (;;) { 34 for (;;) {
33 for (i = 0; i < 100; i++) { 35 for (i = 0; i < 100; i++) {
34 kb_wait(); 36 kb_wait();
35 for (j = 0; j < 100000 ; j++) 37 udelay(50);
36 /* nothing */;
37 outb_p(0xfe, 0x64); /* pulse reset low */ 38 outb_p(0xfe, 0x64); /* pulse reset low */
39 udelay(50);
38 } 40 }
39 } 41 }
40} 42}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ef5f348f386a..018f8c7b94f2 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -26,8 +26,8 @@ aflags-vdso := $(ccflags-vdso) \
26# the comments on that file. 26# the comments on that file.
27# 27#
28ifndef CONFIG_CPU_MIPSR6 28ifndef CONFIG_CPU_MIPSR6
29 ifeq ($(call ld-ifversion, -gt, 22400000, y),) 29 ifeq ($(call ld-ifversion, -lt, 22500000, y),)
30 $(warning MIPS VDSO requires binutils > 2.24) 30 $(warning MIPS VDSO requires binutils >= 2.25)
31 obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y)) 31 obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
32 ccflags-vdso += -DDISABLE_MIPS_VDSO 32 ccflags-vdso += -DDISABLE_MIPS_VDSO
33 endif 33 endif
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig
index 4434b54e1d87..78ae5552fdb8 100644
--- a/arch/mn10300/Kconfig
+++ b/arch/mn10300/Kconfig
@@ -1,6 +1,7 @@
1config MN10300 1config MN10300
2 def_bool y 2 def_bool y
3 select HAVE_OPROFILE 3 select HAVE_OPROFILE
4 select HAVE_UID16
4 select GENERIC_IRQ_SHOW 5 select GENERIC_IRQ_SHOW
5 select ARCH_WANT_IPC_PARSE_VERSION 6 select ARCH_WANT_IPC_PARSE_VERSION
6 select HAVE_ARCH_TRACEHOOK 7 select HAVE_ARCH_TRACEHOOK
@@ -37,9 +38,6 @@ config HIGHMEM
37config NUMA 38config NUMA
38 def_bool n 39 def_bool n
39 40
40config UID16
41 def_bool y
42
43config RWSEM_GENERIC_SPINLOCK 41config RWSEM_GENERIC_SPINLOCK
44 def_bool y 42 def_bool y
45 43
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
index 223cdcc8203f..87bf88ed04c6 100644
--- a/arch/nios2/mm/cacheflush.c
+++ b/arch/nios2/mm/cacheflush.c
@@ -23,22 +23,6 @@ static void __flush_dcache(unsigned long start, unsigned long end)
23 end += (cpuinfo.dcache_line_size - 1); 23 end += (cpuinfo.dcache_line_size - 1);
24 end &= ~(cpuinfo.dcache_line_size - 1); 24 end &= ~(cpuinfo.dcache_line_size - 1);
25 25
26 for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
27 __asm__ __volatile__ (" flushda 0(%0)\n"
28 : /* Outputs */
29 : /* Inputs */ "r"(addr)
30 /* : No clobber */);
31 }
32}
33
34static void __flush_dcache_all(unsigned long start, unsigned long end)
35{
36 unsigned long addr;
37
38 start &= ~(cpuinfo.dcache_line_size - 1);
39 end += (cpuinfo.dcache_line_size - 1);
40 end &= ~(cpuinfo.dcache_line_size - 1);
41
42 if (end > start + cpuinfo.dcache_size) 26 if (end > start + cpuinfo.dcache_size)
43 end = start + cpuinfo.dcache_size; 27 end = start + cpuinfo.dcache_size;
44 28
@@ -112,7 +96,7 @@ static void flush_aliases(struct address_space *mapping, struct page *page)
112 96
113void flush_cache_all(void) 97void flush_cache_all(void)
114{ 98{
115 __flush_dcache_all(0, cpuinfo.dcache_size); 99 __flush_dcache(0, cpuinfo.dcache_size);
116 __flush_icache(0, cpuinfo.icache_size); 100 __flush_icache(0, cpuinfo.icache_size);
117} 101}
118 102
@@ -182,7 +166,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
182 */ 166 */
183 unsigned long start = (unsigned long)page_address(page); 167 unsigned long start = (unsigned long)page_address(page);
184 168
185 __flush_dcache_all(start, start + PAGE_SIZE); 169 __flush_dcache(start, start + PAGE_SIZE);
186} 170}
187 171
188void flush_dcache_page(struct page *page) 172void flush_dcache_page(struct page *page)
@@ -268,7 +252,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
268{ 252{
269 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 253 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
270 memcpy(dst, src, len); 254 memcpy(dst, src, len);
271 __flush_dcache_all((unsigned long)src, (unsigned long)src + len); 255 __flush_dcache((unsigned long)src, (unsigned long)src + len);
272 if (vma->vm_flags & VM_EXEC) 256 if (vma->vm_flags & VM_EXEC)
273 __flush_icache((unsigned long)src, (unsigned long)src + len); 257 __flush_icache((unsigned long)src, (unsigned long)src + len);
274} 258}
@@ -279,7 +263,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
279{ 263{
280 flush_cache_page(vma, user_vaddr, page_to_pfn(page)); 264 flush_cache_page(vma, user_vaddr, page_to_pfn(page));
281 memcpy(dst, src, len); 265 memcpy(dst, src, len);
282 __flush_dcache_all((unsigned long)dst, (unsigned long)dst + len); 266 __flush_dcache((unsigned long)dst, (unsigned long)dst + len);
283 if (vma->vm_flags & VM_EXEC) 267 if (vma->vm_flags & VM_EXEC)
284 __flush_icache((unsigned long)dst, (unsigned long)dst + len); 268 __flush_icache((unsigned long)dst, (unsigned long)dst + len);
285} 269}
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index d8534f95915a..291cee28ccb6 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
372 */ 372 */
373#ifdef CONFIG_HUGETLB_PAGE 373#ifdef CONFIG_HUGETLB_PAGE
374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE) 374#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
375#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE)) 375#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
376 (parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
376#else 377#else
377#define pte_huge(pte) (0) 378#define pte_huge(pte) (0)
378#define pte_mkhuge(pte) (pte) 379#define pte_mkhuge(pte) (pte)
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 33170384d3ac..35bdccbb2036 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -360,8 +360,9 @@
360#define __NR_execveat (__NR_Linux + 342) 360#define __NR_execveat (__NR_Linux + 342)
361#define __NR_membarrier (__NR_Linux + 343) 361#define __NR_membarrier (__NR_Linux + 343)
362#define __NR_userfaultfd (__NR_Linux + 344) 362#define __NR_userfaultfd (__NR_Linux + 344)
363#define __NR_mlock2 (__NR_Linux + 345)
363 364
364#define __NR_Linux_syscalls (__NR_userfaultfd + 1) 365#define __NR_Linux_syscalls (__NR_mlock2 + 1)
365 366
366 367
367#define __IGNORE_select /* newselect */ 368#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 64f2764a8cef..c99f3dde455c 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
171} 171}
172 172
173 173
174void __init pcibios_init_bus(struct pci_bus *bus)
175{
176 struct pci_dev *dev = bus->self;
177 unsigned short bridge_ctl;
178
179 /* We deal only with pci controllers and pci-pci bridges. */
180 if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
181 return;
182
183 /* PCI-PCI bridge - set the cache line and default latency
184 (32) for primary and secondary buses. */
185 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
186
187 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
188 bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
189 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
190}
191
192/* 174/*
193 * pcibios align resources() is called every time generic PCI code 175 * pcibios align resources() is called every time generic PCI code
194 * wants to generate a new address. The process of looking for 176 * wants to generate a new address. The process of looking for
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index dc1ea796fd60..2264f68f3c2f 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
435 regs->gr[28]); 435 regs->gr[28]);
436} 436}
437 437
438/*
439 * Check how the syscall number gets loaded into %r20 within
440 * the delay branch in userspace and adjust as needed.
441 */
442
443static void check_syscallno_in_delay_branch(struct pt_regs *regs)
444{
445 u32 opcode, source_reg;
446 u32 __user *uaddr;
447 int err;
448
449 /* Usually we don't have to restore %r20 (the system call number)
450 * because it gets loaded in the delay slot of the branch external
451 * instruction via the ldi instruction.
452 * In some cases a register-to-register copy instruction might have
453 * been used instead, in which case we need to copy the syscall
454 * number into the source register before returning to userspace.
455 */
456
457 /* A syscall is just a branch, so all we have to do is fiddle the
458 * return pointer so that the ble instruction gets executed again.
459 */
460 regs->gr[31] -= 8; /* delayed branching */
461
462 /* Get assembler opcode of code in delay branch */
463 uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
464 err = get_user(opcode, uaddr);
465 if (err)
466 return;
467
468 /* Check if delay branch uses "ldi int,%r20" */
469 if ((opcode & 0xffff0000) == 0x34140000)
470 return; /* everything ok, just return */
471
472 /* Check if delay branch uses "nop" */
473 if (opcode == INSN_NOP)
474 return;
475
476 /* Check if delay branch uses "copy %rX,%r20" */
477 if ((opcode & 0xffe0ffff) == 0x08000254) {
478 source_reg = (opcode >> 16) & 31;
479 regs->gr[source_reg] = regs->gr[20];
480 return;
481 }
482
483 pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
484 current->comm, task_pid_nr(current), opcode);
485}
486
438static inline void 487static inline void
439syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) 488syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
440{ 489{
@@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
457 } 506 }
458 /* fallthrough */ 507 /* fallthrough */
459 case -ERESTARTNOINTR: 508 case -ERESTARTNOINTR:
460 /* A syscall is just a branch, so all 509 check_syscallno_in_delay_branch(regs);
461 * we have to do is fiddle the return pointer.
462 */
463 regs->gr[31] -= 8; /* delayed branching */
464 break; 510 break;
465 } 511 }
466} 512}
@@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs)
510 } 556 }
511 case -ERESTARTNOHAND: 557 case -ERESTARTNOHAND:
512 case -ERESTARTSYS: 558 case -ERESTARTSYS:
513 case -ERESTARTNOINTR: { 559 case -ERESTARTNOINTR:
514 /* Hooray for delayed branching. We don't 560 check_syscallno_in_delay_branch(regs);
515 * have to restore %r20 (the system call
516 * number) because it gets loaded in the delay
517 * slot of the branch external instruction.
518 */
519 regs->gr[31] -= 8;
520 return; 561 return;
521 }
522 default: 562 default:
523 break; 563 break;
524 } 564 }
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 78c3ef8c348d..d4ffcfbc9885 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -440,6 +440,7 @@
440 ENTRY_COMP(execveat) 440 ENTRY_COMP(execveat)
441 ENTRY_SAME(membarrier) 441 ENTRY_SAME(membarrier)
442 ENTRY_SAME(userfaultfd) 442 ENTRY_SAME(userfaultfd)
443 ENTRY_SAME(mlock2) /* 345 */
443 444
444 445
445.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) 446.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/powerpc/boot/dts/sbc8641d.dts b/arch/powerpc/boot/dts/sbc8641d.dts
index 631ede72e226..68f0ed7626bd 100644
--- a/arch/powerpc/boot/dts/sbc8641d.dts
+++ b/arch/powerpc/boot/dts/sbc8641d.dts
@@ -227,23 +227,15 @@
227 reg = <0x520 0x20>; 227 reg = <0x520 0x20>;
228 228
229 phy0: ethernet-phy@1f { 229 phy0: ethernet-phy@1f {
230 interrupt-parent = <&mpic>;
231 interrupts = <10 1>;
232 reg = <0x1f>; 230 reg = <0x1f>;
233 }; 231 };
234 phy1: ethernet-phy@0 { 232 phy1: ethernet-phy@0 {
235 interrupt-parent = <&mpic>;
236 interrupts = <10 1>;
237 reg = <0>; 233 reg = <0>;
238 }; 234 };
239 phy2: ethernet-phy@1 { 235 phy2: ethernet-phy@1 {
240 interrupt-parent = <&mpic>;
241 interrupts = <10 1>;
242 reg = <1>; 236 reg = <1>;
243 }; 237 };
244 phy3: ethernet-phy@2 { 238 phy3: ethernet-phy@2 {
245 interrupt-parent = <&mpic>;
246 interrupts = <10 1>;
247 reg = <2>; 239 reg = <2>;
248 }; 240 };
249 tbi0: tbi-phy@11 { 241 tbi0: tbi-phy@11 {
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a908ada8e0a5..2220f7a60def 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -108,6 +108,7 @@
108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */ 108#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */ 109#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */ 110#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
111#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
111#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) 112#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
112#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) 113#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
113 114
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index f2b0b1b0c72a..5654ece02c0d 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
370PPC64ONLY(switch_endian) 370PPC64ONLY(switch_endian)
371SYSCALL_SPU(userfaultfd) 371SYSCALL_SPU(userfaultfd)
372SYSCALL_SPU(membarrier) 372SYSCALL_SPU(membarrier)
373SYSCALL(semop) 373SYSCALL(ni_syscall)
374SYSCALL(semget) 374SYSCALL(ni_syscall)
375COMPAT_SYS(semctl) 375SYSCALL(ni_syscall)
376COMPAT_SYS(semtimedop) 376SYSCALL(ni_syscall)
377COMPAT_SYS(msgsnd) 377SYSCALL(ni_syscall)
378COMPAT_SYS(msgrcv) 378SYSCALL(ni_syscall)
379SYSCALL(msgget) 379SYSCALL(ni_syscall)
380COMPAT_SYS(msgctl) 380SYSCALL(ni_syscall)
381COMPAT_SYS(shmat) 381SYSCALL(ni_syscall)
382SYSCALL(shmdt) 382SYSCALL(ni_syscall)
383SYSCALL(shmget) 383SYSCALL(ni_syscall)
384COMPAT_SYS(shmctl) 384SYSCALL(ni_syscall)
385SYSCALL(mlock2) 385SYSCALL(mlock2)
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 1effea5193d6..12a05652377a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -388,18 +388,6 @@
388#define __NR_switch_endian 363 388#define __NR_switch_endian 363
389#define __NR_userfaultfd 364 389#define __NR_userfaultfd 364
390#define __NR_membarrier 365 390#define __NR_membarrier 365
391#define __NR_semop 366
392#define __NR_semget 367
393#define __NR_semctl 368
394#define __NR_semtimedop 369
395#define __NR_msgsnd 370
396#define __NR_msgrcv 371
397#define __NR_msgget 372
398#define __NR_msgctl 373
399#define __NR_shmat 374
400#define __NR_shmdt 375
401#define __NR_shmget 376
402#define __NR_shmctl 377
403#define __NR_mlock2 378 391#define __NR_mlock2 378
404 392
405#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 393#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 80dfe8965df9..8d14feb40f12 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
590 eeh_ops->configure_bridge(pe); 590 eeh_ops->configure_bridge(pe);
591 eeh_pe_restore_bars(pe); 591 eeh_pe_restore_bars(pe);
592 592
593 /* 593 /* Clear frozen state */
594 * If it's PHB PE, the frozen state on all available PEs should have 594 rc = eeh_clear_pe_frozen_state(pe, false);
595 * been cleared by the PHB reset. Otherwise, we unfreeze the PE and its 595 if (rc)
596 * child PEs because they might be in frozen state. 596 return rc;
597 */
598 if (!(pe->type & EEH_PE_PHB)) {
599 rc = eeh_clear_pe_frozen_state(pe, false);
600 if (rc)
601 return rc;
602 }
603 597
604 /* Give the system 5 seconds to finish running the user-space 598 /* Give the system 5 seconds to finish running the user-space
605 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, 599 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 75b6676c1a0b..646bf4d222c1 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -551,6 +551,24 @@ static void tm_reclaim_thread(struct thread_struct *thr,
551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; 551 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
552 } 552 }
553 553
554 /*
555 * Use the current MSR TM suspended bit to track if we have
556 * checkpointed state outstanding.
557 * On signal delivery, we'd normally reclaim the checkpointed
558 * state to obtain stack pointer (see:get_tm_stackpointer()).
559 * This will then directly return to userspace without going
560 * through __switch_to(). However, if the stack frame is bad,
561 * we need to exit this thread which calls __switch_to() which
562 * will again attempt to reclaim the already saved tm state.
563 * Hence we need to check that we've not already reclaimed
564 * this state.
565 * We do this using the current MSR, rather tracking it in
566 * some specific thread_struct bit, as it has the additional
567 * benifit of checking for a potential TM bad thing exception.
568 */
569 if (!MSR_TM_SUSPENDED(mfmsr()))
570 return;
571
554 tm_reclaim(thr, thr->regs->msr, cause); 572 tm_reclaim(thr, thr->regs->msr, cause);
555 573
556 /* Having done the reclaim, we now have the checkpointed 574 /* Having done the reclaim, we now have the checkpointed
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 0dbee465af7a..ef7c24e84a62 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -875,6 +875,15 @@ static long restore_tm_user_regs(struct pt_regs *regs,
875 return 1; 875 return 1;
876#endif /* CONFIG_SPE */ 876#endif /* CONFIG_SPE */
877 877
878 /* Get the top half of the MSR from the user context */
879 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
880 return 1;
881 msr_hi <<= 32;
882 /* If TM bits are set to the reserved value, it's an invalid context */
883 if (MSR_TM_RESV(msr_hi))
884 return 1;
885 /* Pull in the MSR TM bits from the user context */
886 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
878 /* Now, recheckpoint. This loads up all of the checkpointed (older) 887 /* Now, recheckpoint. This loads up all of the checkpointed (older)
879 * registers, including FP and V[S]Rs. After recheckpointing, the 888 * registers, including FP and V[S]Rs. After recheckpointing, the
880 * transactional versions should be loaded. 889 * transactional versions should be loaded.
@@ -884,11 +893,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
884 current->thread.tm_texasr |= TEXASR_FS; 893 current->thread.tm_texasr |= TEXASR_FS;
885 /* This loads the checkpointed FP/VEC state, if used */ 894 /* This loads the checkpointed FP/VEC state, if used */
886 tm_recheckpoint(&current->thread, msr); 895 tm_recheckpoint(&current->thread, msr);
887 /* Get the top half of the MSR */
888 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
889 return 1;
890 /* Pull in MSR TM from user context */
891 regs->msr = (regs->msr & ~MSR_TS_MASK) | ((msr_hi<<32) & MSR_TS_MASK);
892 896
893 /* This loads the speculative FP/VEC state, if used */ 897 /* This loads the speculative FP/VEC state, if used */
894 if (msr & MSR_FP) { 898 if (msr & MSR_FP) {
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 20756dfb9f34..c676ecec0869 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -438,6 +438,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
438 438
439 /* get MSR separately, transfer the LE bit if doing signal return */ 439 /* get MSR separately, transfer the LE bit if doing signal return */
440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]); 440 err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
441 /* Don't allow reserved mode. */
442 if (MSR_TM_RESV(msr))
443 return -EINVAL;
444
441 /* pull in MSR TM from user context */ 445 /* pull in MSR TM from user context */
442 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); 446 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
443 447
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 54b45b73195f..a7352b59e6f9 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
224 224
225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 225static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
226{ 226{
227 /*
228 * Check for illegal transactional state bit combination
229 * and if we find it, force the TS field to a safe state.
230 */
231 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
232 msr &= ~MSR_TS_MASK;
227 vcpu->arch.shregs.msr = msr; 233 vcpu->arch.shregs.msr = msr;
228 kvmppc_end_cede(vcpu); 234 kvmppc_end_cede(vcpu);
229} 235}
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 6ccfb6c1c707..e505223b4ec5 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
43static unsigned int *opal_irqs; 43static unsigned int *opal_irqs;
44 44
45static void opal_handle_irq_work(struct irq_work *work); 45static void opal_handle_irq_work(struct irq_work *work);
46static __be64 last_outstanding_events; 46static u64 last_outstanding_events;
47static struct irq_work opal_event_irq_work = { 47static struct irq_work opal_event_irq_work = {
48 .func = opal_handle_irq_work, 48 .func = opal_handle_irq_work,
49}; 49};
50 50
51void opal_handle_events(uint64_t events)
52{
53 int virq, hwirq = 0;
54 u64 mask = opal_event_irqchip.mask;
55
56 if (!in_irq() && (events & mask)) {
57 last_outstanding_events = events;
58 irq_work_queue(&opal_event_irq_work);
59 return;
60 }
61
62 while (events & mask) {
63 hwirq = fls64(events) - 1;
64 if (BIT_ULL(hwirq) & mask) {
65 virq = irq_find_mapping(opal_event_irqchip.domain,
66 hwirq);
67 if (virq)
68 generic_handle_irq(virq);
69 }
70 events &= ~BIT_ULL(hwirq);
71 }
72}
73
51static void opal_event_mask(struct irq_data *d) 74static void opal_event_mask(struct irq_data *d)
52{ 75{
53 clear_bit(d->hwirq, &opal_event_irqchip.mask); 76 clear_bit(d->hwirq, &opal_event_irqchip.mask);
@@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
55 78
56static void opal_event_unmask(struct irq_data *d) 79static void opal_event_unmask(struct irq_data *d)
57{ 80{
81 __be64 events;
82
58 set_bit(d->hwirq, &opal_event_irqchip.mask); 83 set_bit(d->hwirq, &opal_event_irqchip.mask);
59 84
60 opal_poll_events(&last_outstanding_events); 85 opal_poll_events(&events);
86 last_outstanding_events = be64_to_cpu(events);
87
88 /*
89 * We can't just handle the events now with opal_handle_events().
90 * If we did we would deadlock when opal_event_unmask() is called from
91 * handle_level_irq() with the irq descriptor lock held, because
92 * calling opal_handle_events() would call generic_handle_irq() and
93 * then handle_level_irq() which would try to take the descriptor lock
94 * again. Instead queue the events for later.
95 */
61 if (last_outstanding_events & opal_event_irqchip.mask) 96 if (last_outstanding_events & opal_event_irqchip.mask)
62 /* Need to retrigger the interrupt */ 97 /* Need to retrigger the interrupt */
63 irq_work_queue(&opal_event_irq_work); 98 irq_work_queue(&opal_event_irq_work);
@@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
96 return 0; 131 return 0;
97} 132}
98 133
99void opal_handle_events(uint64_t events)
100{
101 int virq, hwirq = 0;
102 u64 mask = opal_event_irqchip.mask;
103
104 if (!in_irq() && (events & mask)) {
105 last_outstanding_events = events;
106 irq_work_queue(&opal_event_irq_work);
107 return;
108 }
109
110 while (events & mask) {
111 hwirq = fls64(events) - 1;
112 if (BIT_ULL(hwirq) & mask) {
113 virq = irq_find_mapping(opal_event_irqchip.domain,
114 hwirq);
115 if (virq)
116 generic_handle_irq(virq);
117 }
118 events &= ~BIT_ULL(hwirq);
119 }
120}
121
122static irqreturn_t opal_interrupt(int irq, void *data) 134static irqreturn_t opal_interrupt(int irq, void *data)
123{ 135{
124 __be64 events; 136 __be64 events;
@@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
131 143
132static void opal_handle_irq_work(struct irq_work *work) 144static void opal_handle_irq_work(struct irq_work *work)
133{ 145{
134 opal_handle_events(be64_to_cpu(last_outstanding_events)); 146 opal_handle_events(last_outstanding_events);
135} 147}
136 148
137static int opal_event_match(struct irq_domain *h, struct device_node *node, 149static int opal_event_match(struct irq_domain *h, struct device_node *node,
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 4296d55e88f3..57cffb80bc36 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -278,7 +278,7 @@ static void opal_handle_message(void)
278 278
279 /* Sanity check */ 279 /* Sanity check */
280 if (type >= OPAL_MSG_TYPE_MAX) { 280 if (type >= OPAL_MSG_TYPE_MAX) {
281 pr_warning("%s: Unknown message type: %u\n", __func__, type); 281 pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
282 return; 282 return;
283 } 283 }
284 opal_message_do_notify(type, (void *)&msg); 284 opal_message_do_notify(type, (void *)&msg);
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 8140d10c6785..6e72961608f0 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
1920 } 1920 }
1921 if (separator) 1921 if (separator)
1922 ptr += sprintf(ptr, "%c", separator); 1922 ptr += sprintf(ptr, "%c", separator);
1923 /*
1924 * Use four '%' characters below because of the
1925 * following two conversions:
1926 *
1927 * 1) sprintf: %%%%r -> %%r
1928 * 2) printk : %%r -> %r
1929 */
1923 if (operand->flags & OPERAND_GPR) 1930 if (operand->flags & OPERAND_GPR)
1924 ptr += sprintf(ptr, "%%r%i", value); 1931 ptr += sprintf(ptr, "%%%%r%i", value);
1925 else if (operand->flags & OPERAND_FPR) 1932 else if (operand->flags & OPERAND_FPR)
1926 ptr += sprintf(ptr, "%%f%i", value); 1933 ptr += sprintf(ptr, "%%%%f%i", value);
1927 else if (operand->flags & OPERAND_AR) 1934 else if (operand->flags & OPERAND_AR)
1928 ptr += sprintf(ptr, "%%a%i", value); 1935 ptr += sprintf(ptr, "%%%%a%i", value);
1929 else if (operand->flags & OPERAND_CR) 1936 else if (operand->flags & OPERAND_CR)
1930 ptr += sprintf(ptr, "%%c%i", value); 1937 ptr += sprintf(ptr, "%%%%c%i", value);
1931 else if (operand->flags & OPERAND_VR) 1938 else if (operand->flags & OPERAND_VR)
1932 ptr += sprintf(ptr, "%%v%i", value); 1939 ptr += sprintf(ptr, "%%%%v%i", value);
1933 else if (operand->flags & OPERAND_PCREL) 1940 else if (operand->flags & OPERAND_PCREL)
1934 ptr += sprintf(ptr, "%lx", (signed int) value 1941 ptr += sprintf(ptr, "%lx", (signed int) value
1935 + addr); 1942 + addr);
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 373e32346d68..6a75352f453c 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1030,8 +1030,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1030 src_id, 0); 1030 src_id, 0);
1031 1031
1032 /* sending vcpu invalid */ 1032 /* sending vcpu invalid */
1033 if (src_id >= KVM_MAX_VCPUS || 1033 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
1034 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1035 return -EINVAL; 1034 return -EINVAL;
1036 1035
1037 if (sclp.has_sigpif) 1036 if (sclp.has_sigpif)
@@ -1110,6 +1109,10 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
1110 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY, 1109 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
1111 irq->u.emerg.code, 0); 1110 irq->u.emerg.code, 0);
1112 1111
1112 /* sending vcpu invalid */
1113 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1114 return -EINVAL;
1115
1113 set_bit(irq->u.emerg.code, li->sigp_emerg_pending); 1116 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
1114 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs); 1117 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1115 atomic_or(CPUSTAT_EXT_INT, li->cpuflags); 1118 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8fe2f1c722dc..846589281b04 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -342,12 +342,16 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
342 r = 0; 342 r = 0;
343 break; 343 break;
344 case KVM_CAP_S390_VECTOR_REGISTERS: 344 case KVM_CAP_S390_VECTOR_REGISTERS:
345 if (MACHINE_HAS_VX) { 345 mutex_lock(&kvm->lock);
346 if (atomic_read(&kvm->online_vcpus)) {
347 r = -EBUSY;
348 } else if (MACHINE_HAS_VX) {
346 set_kvm_facility(kvm->arch.model.fac->mask, 129); 349 set_kvm_facility(kvm->arch.model.fac->mask, 129);
347 set_kvm_facility(kvm->arch.model.fac->list, 129); 350 set_kvm_facility(kvm->arch.model.fac->list, 129);
348 r = 0; 351 r = 0;
349 } else 352 } else
350 r = -EINVAL; 353 r = -EINVAL;
354 mutex_unlock(&kvm->lock);
351 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s", 355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
352 r ? "(not available)" : "(success)"); 356 r ? "(not available)" : "(success)");
353 break; 357 break;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 77191b85ea7a..d76b51cb4b62 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -660,7 +660,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
660 660
661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2); 661 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
662 662
663 if (!MACHINE_HAS_PFMF) 663 if (!test_kvm_facility(vcpu->kvm, 8))
664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); 664 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
665 665
666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 666 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index da690b69f9fe..77c22d685c7a 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -291,12 +291,8 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
291 u16 cpu_addr, u32 parameter, u64 *status_reg) 291 u16 cpu_addr, u32 parameter, u64 *status_reg)
292{ 292{
293 int rc; 293 int rc;
294 struct kvm_vcpu *dst_vcpu; 294 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
295 295
296 if (cpu_addr >= KVM_MAX_VCPUS)
297 return SIGP_CC_NOT_OPERATIONAL;
298
299 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
300 if (!dst_vcpu) 296 if (!dst_vcpu)
301 return SIGP_CC_NOT_OPERATIONAL; 297 return SIGP_CC_NOT_OPERATIONAL;
302 298
@@ -478,7 +474,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
478 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 474 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr);
479 475
480 if (order_code == SIGP_EXTERNAL_CALL) { 476 if (order_code == SIGP_EXTERNAL_CALL) {
481 dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); 477 dest_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, cpu_addr);
482 BUG_ON(dest_vcpu == NULL); 478 BUG_ON(dest_vcpu == NULL);
483 479
484 kvm_s390_vcpu_wakeup(dest_vcpu); 480 kvm_s390_vcpu_wakeup(dest_vcpu);
diff --git a/arch/sh/include/uapi/asm/unistd_64.h b/arch/sh/include/uapi/asm/unistd_64.h
index e6820c86e8c7..47ebd5b5ed55 100644
--- a/arch/sh/include/uapi/asm/unistd_64.h
+++ b/arch/sh/include/uapi/asm/unistd_64.h
@@ -278,7 +278,7 @@
278#define __NR_fsetxattr 256 278#define __NR_fsetxattr 256
279#define __NR_getxattr 257 279#define __NR_getxattr 257
280#define __NR_lgetxattr 258 280#define __NR_lgetxattr 258
281#define __NR_fgetxattr 269 281#define __NR_fgetxattr 259
282#define __NR_listxattr 260 282#define __NR_listxattr 260
283#define __NR_llistxattr 261 283#define __NR_llistxattr 261
284#define __NR_flistxattr 262 284#define __NR_flistxattr 262
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7cfd7f153966..4dca18347ee9 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 10 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
11 * Copyright (C) 2009 Jaswinder Singh Rajput 11 * Copyright (C) 2009 Jaswinder Singh Rajput
12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 12 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 13 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 14 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
15 * 15 *
16 * ppc: 16 * ppc:
diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
index 370ca1e71ffb..93310837c2df 100644
--- a/arch/sparc/include/asm/elf_64.h
+++ b/arch/sparc/include/asm/elf_64.h
@@ -95,6 +95,7 @@
95 * really available. So we simply advertise only "crypto" support. 95 * really available. So we simply advertise only "crypto" support.
96 */ 96 */
97#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */ 97#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
98#define HWCAP_SPARC_ADI 0x08000000 /* ADI available */
98 99
99#define CORE_DUMP_USE_REGSET 100#define CORE_DUMP_USE_REGSET
100 101
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index efe9479f837b..f31a124a8497 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -417,8 +417,9 @@
417#define __NR_bpf 349 417#define __NR_bpf 349
418#define __NR_execveat 350 418#define __NR_execveat 350
419#define __NR_membarrier 351 419#define __NR_membarrier 351
420#define __NR_userfaultfd 352
420 421
421#define NR_syscalls 352 422#define NR_syscalls 353
422 423
423/* Bitmask values returned from kern_features system call. */ 424/* Bitmask values returned from kern_features system call. */
424#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 425#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 3d61fcae7ee3..f2d30cab5b3f 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -946,6 +946,12 @@ ENTRY(__retl_one)
946 mov 1, %o0 946 mov 1, %o0
947ENDPROC(__retl_one) 947ENDPROC(__retl_one)
948 948
949ENTRY(__retl_one_fp)
950 VISExitHalf
951 retl
952 mov 1, %o0
953ENDPROC(__retl_one_fp)
954
949ENTRY(__ret_one_asi) 955ENTRY(__ret_one_asi)
950 wr %g0, ASI_AIUS, %asi 956 wr %g0, ASI_AIUS, %asi
951 ret 957 ret
@@ -958,6 +964,13 @@ ENTRY(__retl_one_asi)
958 mov 1, %o0 964 mov 1, %o0
959ENDPROC(__retl_one_asi) 965ENDPROC(__retl_one_asi)
960 966
967ENTRY(__retl_one_asi_fp)
968 wr %g0, ASI_AIUS, %asi
969 VISExitHalf
970 retl
971 mov 1, %o0
972ENDPROC(__retl_one_asi_fp)
973
961ENTRY(__retl_o1) 974ENTRY(__retl_o1)
962 retl 975 retl
963 mov %o1, %o0 976 mov %o1, %o0
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b0da5aedb336..6596f66ce112 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -9,7 +9,7 @@
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput 10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
13 */ 13 */
14 14
15#include <linux/perf_event.h> 15#include <linux/perf_event.h>
@@ -1828,11 +1828,18 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1828void 1828void
1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) 1829perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1830{ 1830{
1831 u64 saved_fault_address = current_thread_info()->fault_address;
1832 u8 saved_fault_code = get_thread_fault_code();
1833 mm_segment_t old_fs;
1834
1831 perf_callchain_store(entry, regs->tpc); 1835 perf_callchain_store(entry, regs->tpc);
1832 1836
1833 if (!current->mm) 1837 if (!current->mm)
1834 return; 1838 return;
1835 1839
1840 old_fs = get_fs();
1841 set_fs(USER_DS);
1842
1836 flushw_user(); 1843 flushw_user();
1837 1844
1838 pagefault_disable(); 1845 pagefault_disable();
@@ -1843,4 +1850,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1843 perf_callchain_user_64(entry, regs); 1850 perf_callchain_user_64(entry, regs);
1844 1851
1845 pagefault_enable(); 1852 pagefault_enable();
1853
1854 set_fs(old_fs);
1855 set_thread_fault_code(saved_fault_code);
1856 current_thread_info()->fault_address = saved_fault_address;
1846} 1857}
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 39f0c662f4c8..d08bdaffdbfc 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -73,7 +73,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
73 andn %l1, %l4, %l1 73 andn %l1, %l4, %l1
74 srl %l4, 20, %l4 74 srl %l4, 20, %l4
75 ba,pt %xcc, rtrap_no_irq_enable 75 ba,pt %xcc, rtrap_no_irq_enable
76 wrpr %l4, %pil 76 nop
77 /* Do not actually set the %pil here. We will do that
78 * below after we clear PSTATE_IE in the %pstate register.
79 * If we re-enable interrupts here, we can recurse down
80 * the hardirq stack potentially endlessly, causing a
81 * stack overflow.
82 */
77 83
78 .align 64 84 .align 64
79 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall 85 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f7b261749383..f3185e2b028b 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -380,7 +380,8 @@ static const char *hwcaps[] = {
380 */ 380 */
381 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", 381 "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
382 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", 382 "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
383 "ima", "cspare", "pause", "cbcond", 383 "ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
384 "adp",
384}; 385};
385 386
386static const char *crypto_hwcaps[] = { 387static const char *crypto_hwcaps[] = {
@@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m)
396 seq_puts(m, "cpucaps\t\t: "); 397 seq_puts(m, "cpucaps\t\t: ");
397 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 398 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
398 unsigned long bit = 1UL << i; 399 unsigned long bit = 1UL << i;
399 if (caps & bit) { 400 if (hwcaps[i] && (caps & bit)) {
400 seq_printf(m, "%s%s", 401 seq_printf(m, "%s%s",
401 printed ? "," : "", hwcaps[i]); 402 printed ? "," : "", hwcaps[i]);
402 printed++; 403 printed++;
@@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps)
450 451
451 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 452 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
452 unsigned long bit = 1UL << i; 453 unsigned long bit = 1UL << i;
453 if (caps & bit) 454 if (hwcaps[i] && (caps & bit))
454 report_one_hwcap(&printed, hwcaps[i]); 455 report_one_hwcap(&printed, hwcaps[i]);
455 } 456 }
456 if (caps & HWCAP_SPARC_CRYPTO) 457 if (caps & HWCAP_SPARC_CRYPTO)
@@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
485 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { 486 for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
486 unsigned long bit = 1UL << i; 487 unsigned long bit = 1UL << i;
487 488
488 if (!strcmp(prop, hwcaps[i])) { 489 if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
489 caps |= bit; 490 caps |= bit;
490 break; 491 break;
491 } 492 }
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index cc23b62b6e38..78e80293cb6d 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -87,4 +87,4 @@ sys_call_table:
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
90/*350*/ .long sys_execveat, sys_membarrier 90/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index f229468a7479..2549c2c3ec2f 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -88,7 +88,7 @@ sys_call_table32:
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91/*350*/ .word sys32_execveat, sys_membarrier 91/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd
92 92
93#endif /* CONFIG_COMPAT */ 93#endif /* CONFIG_COMPAT */
94 94
@@ -168,4 +168,4 @@ sys_call_table:
168 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 168 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
169/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 169/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
170 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf 170 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
171/*350*/ .word sys64_execveat, sys_membarrier 171/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd
diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S
index 119ccb9a54f4..d5242b8c4f94 100644
--- a/arch/sparc/lib/NG2copy_from_user.S
+++ b/arch/sparc/lib/NG2copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S
index 7fe1ccefd9d0..4e962d993b10 100644
--- a/arch/sparc/lib/NG2copy_to_user.S
+++ b/arch/sparc/lib/NG2copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG2memcpy.S b/arch/sparc/lib/NG2memcpy.S
index 30eee6e8a81b..d5f585df2f3f 100644
--- a/arch/sparc/lib/NG2memcpy.S
+++ b/arch/sparc/lib/NG2memcpy.S
@@ -34,10 +34,16 @@
34#ifndef EX_LD 34#ifndef EX_LD
35#define EX_LD(x) x 35#define EX_LD(x) x
36#endif 36#endif
37#ifndef EX_LD_FP
38#define EX_LD_FP(x) x
39#endif
37 40
38#ifndef EX_ST 41#ifndef EX_ST
39#define EX_ST(x) x 42#define EX_ST(x) x
40#endif 43#endif
44#ifndef EX_ST_FP
45#define EX_ST_FP(x) x
46#endif
41 47
42#ifndef EX_RETVAL 48#ifndef EX_RETVAL
43#define EX_RETVAL(x) x 49#define EX_RETVAL(x) x
@@ -134,40 +140,40 @@
134 fsrc2 %x6, %f12; \ 140 fsrc2 %x6, %f12; \
135 fsrc2 %x7, %f14; 141 fsrc2 %x7, %f14;
136#define FREG_LOAD_1(base, x0) \ 142#define FREG_LOAD_1(base, x0) \
137 EX_LD(LOAD(ldd, base + 0x00, %x0)) 143 EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
138#define FREG_LOAD_2(base, x0, x1) \ 144#define FREG_LOAD_2(base, x0, x1) \
139 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 145 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
140 EX_LD(LOAD(ldd, base + 0x08, %x1)); 146 EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
141#define FREG_LOAD_3(base, x0, x1, x2) \ 147#define FREG_LOAD_3(base, x0, x1, x2) \
142 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 148 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
143 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 149 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
144 EX_LD(LOAD(ldd, base + 0x10, %x2)); 150 EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
145#define FREG_LOAD_4(base, x0, x1, x2, x3) \ 151#define FREG_LOAD_4(base, x0, x1, x2, x3) \
146 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 152 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
147 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 153 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
148 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 154 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
149 EX_LD(LOAD(ldd, base + 0x18, %x3)); 155 EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
150#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \ 156#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
151 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 157 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
152 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 158 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
153 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 159 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
154 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 160 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
155 EX_LD(LOAD(ldd, base + 0x20, %x4)); 161 EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
156#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \ 162#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
157 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 163 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
158 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 164 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
159 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 165 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
160 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 166 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
161 EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 167 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
162 EX_LD(LOAD(ldd, base + 0x28, %x5)); 168 EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
163#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \ 169#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
164 EX_LD(LOAD(ldd, base + 0x00, %x0)); \ 170 EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
165 EX_LD(LOAD(ldd, base + 0x08, %x1)); \ 171 EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
166 EX_LD(LOAD(ldd, base + 0x10, %x2)); \ 172 EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
167 EX_LD(LOAD(ldd, base + 0x18, %x3)); \ 173 EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
168 EX_LD(LOAD(ldd, base + 0x20, %x4)); \ 174 EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
169 EX_LD(LOAD(ldd, base + 0x28, %x5)); \ 175 EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
170 EX_LD(LOAD(ldd, base + 0x30, %x6)); 176 EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
171 177
172 .register %g2,#scratch 178 .register %g2,#scratch
173 .register %g3,#scratch 179 .register %g3,#scratch
@@ -275,11 +281,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
275 nop 281 nop
276 /* fall through for 0 < low bits < 8 */ 282 /* fall through for 0 < low bits < 8 */
277110: sub %o4, 64, %g2 283110: sub %o4, 64, %g2
278 EX_LD(LOAD_BLK(%g2, %f0)) 284 EX_LD_FP(LOAD_BLK(%g2, %f0))
2791: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 2851: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
280 EX_LD(LOAD_BLK(%o4, %f16)) 286 EX_LD_FP(LOAD_BLK(%o4, %f16))
281 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16) 287 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
282 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 288 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
283 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30) 289 FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
284 subcc %g1, 64, %g1 290 subcc %g1, 64, %g1
285 add %o4, 64, %o4 291 add %o4, 64, %o4
@@ -290,10 +296,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
290 296
291120: sub %o4, 56, %g2 297120: sub %o4, 56, %g2
292 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12) 298 FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
2931: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 2991: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
294 EX_LD(LOAD_BLK(%o4, %f16)) 300 EX_LD_FP(LOAD_BLK(%o4, %f16))
295 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18) 301 FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
296 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 302 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
297 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30) 303 FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
298 subcc %g1, 64, %g1 304 subcc %g1, 64, %g1
299 add %o4, 64, %o4 305 add %o4, 64, %o4
@@ -304,10 +310,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
304 310
305130: sub %o4, 48, %g2 311130: sub %o4, 48, %g2
306 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10) 312 FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
3071: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3131: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
308 EX_LD(LOAD_BLK(%o4, %f16)) 314 EX_LD_FP(LOAD_BLK(%o4, %f16))
309 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20) 315 FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
310 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 316 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
311 FREG_MOVE_6(f20, f22, f24, f26, f28, f30) 317 FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
312 subcc %g1, 64, %g1 318 subcc %g1, 64, %g1
313 add %o4, 64, %o4 319 add %o4, 64, %o4
@@ -318,10 +324,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
318 324
319140: sub %o4, 40, %g2 325140: sub %o4, 40, %g2
320 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8) 326 FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
3211: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3271: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
322 EX_LD(LOAD_BLK(%o4, %f16)) 328 EX_LD_FP(LOAD_BLK(%o4, %f16))
323 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22) 329 FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
324 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 330 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
325 FREG_MOVE_5(f22, f24, f26, f28, f30) 331 FREG_MOVE_5(f22, f24, f26, f28, f30)
326 subcc %g1, 64, %g1 332 subcc %g1, 64, %g1
327 add %o4, 64, %o4 333 add %o4, 64, %o4
@@ -332,10 +338,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
332 338
333150: sub %o4, 32, %g2 339150: sub %o4, 32, %g2
334 FREG_LOAD_4(%g2, f0, f2, f4, f6) 340 FREG_LOAD_4(%g2, f0, f2, f4, f6)
3351: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3411: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
336 EX_LD(LOAD_BLK(%o4, %f16)) 342 EX_LD_FP(LOAD_BLK(%o4, %f16))
337 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24) 343 FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
338 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 344 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
339 FREG_MOVE_4(f24, f26, f28, f30) 345 FREG_MOVE_4(f24, f26, f28, f30)
340 subcc %g1, 64, %g1 346 subcc %g1, 64, %g1
341 add %o4, 64, %o4 347 add %o4, 64, %o4
@@ -346,10 +352,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
346 352
347160: sub %o4, 24, %g2 353160: sub %o4, 24, %g2
348 FREG_LOAD_3(%g2, f0, f2, f4) 354 FREG_LOAD_3(%g2, f0, f2, f4)
3491: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3551: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
350 EX_LD(LOAD_BLK(%o4, %f16)) 356 EX_LD_FP(LOAD_BLK(%o4, %f16))
351 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26) 357 FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
352 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 358 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
353 FREG_MOVE_3(f26, f28, f30) 359 FREG_MOVE_3(f26, f28, f30)
354 subcc %g1, 64, %g1 360 subcc %g1, 64, %g1
355 add %o4, 64, %o4 361 add %o4, 64, %o4
@@ -360,10 +366,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
360 366
361170: sub %o4, 16, %g2 367170: sub %o4, 16, %g2
362 FREG_LOAD_2(%g2, f0, f2) 368 FREG_LOAD_2(%g2, f0, f2)
3631: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3691: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
364 EX_LD(LOAD_BLK(%o4, %f16)) 370 EX_LD_FP(LOAD_BLK(%o4, %f16))
365 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28) 371 FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
366 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 372 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
367 FREG_MOVE_2(f28, f30) 373 FREG_MOVE_2(f28, f30)
368 subcc %g1, 64, %g1 374 subcc %g1, 64, %g1
369 add %o4, 64, %o4 375 add %o4, 64, %o4
@@ -374,10 +380,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
374 380
375180: sub %o4, 8, %g2 381180: sub %o4, 8, %g2
376 FREG_LOAD_1(%g2, f0) 382 FREG_LOAD_1(%g2, f0)
3771: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3831: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
378 EX_LD(LOAD_BLK(%o4, %f16)) 384 EX_LD_FP(LOAD_BLK(%o4, %f16))
379 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30) 385 FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
380 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 386 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
381 FREG_MOVE_1(f30) 387 FREG_MOVE_1(f30)
382 subcc %g1, 64, %g1 388 subcc %g1, 64, %g1
383 add %o4, 64, %o4 389 add %o4, 64, %o4
@@ -387,10 +393,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
387 nop 393 nop
388 394
389190: 395190:
3901: EX_ST(STORE_INIT(%g0, %o4 + %g3)) 3961: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
391 subcc %g1, 64, %g1 397 subcc %g1, 64, %g1
392 EX_LD(LOAD_BLK(%o4, %f0)) 398 EX_LD_FP(LOAD_BLK(%o4, %f0))
393 EX_ST(STORE_BLK(%f0, %o4 + %g3)) 399 EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
394 add %o4, 64, %o4 400 add %o4, 64, %o4
395 bne,pt %xcc, 1b 401 bne,pt %xcc, 1b
396 LOAD(prefetch, %o4 + 64, #one_read) 402 LOAD(prefetch, %o4 + 64, #one_read)
diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S
index fd9f903ffa32..2e8ee7ad07a9 100644
--- a/arch/sparc/lib/NG4copy_from_user.S
+++ b/arch/sparc/lib/NG4copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S
index 9744c4540a8d..be0bf4590df8 100644
--- a/arch/sparc/lib/NG4copy_to_user.S
+++ b/arch/sparc/lib/NG4copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_asi_fp;\
19 .text; \
20 .align 4;
21
14#ifndef ASI_AIUS 22#ifndef ASI_AIUS
15#define ASI_AIUS 0x11 23#define ASI_AIUS 0x11
16#endif 24#endif
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 83aeeb1dffdb..8e13ee1f4454 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -48,10 +48,16 @@
48#ifndef EX_LD 48#ifndef EX_LD
49#define EX_LD(x) x 49#define EX_LD(x) x
50#endif 50#endif
51#ifndef EX_LD_FP
52#define EX_LD_FP(x) x
53#endif
51 54
52#ifndef EX_ST 55#ifndef EX_ST
53#define EX_ST(x) x 56#define EX_ST(x) x
54#endif 57#endif
58#ifndef EX_ST_FP
59#define EX_ST_FP(x) x
60#endif
55 61
56#ifndef EX_RETVAL 62#ifndef EX_RETVAL
57#define EX_RETVAL(x) x 63#define EX_RETVAL(x) x
@@ -210,17 +216,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
210 sub %o2, %o4, %o2 216 sub %o2, %o4, %o2
211 alignaddr %o1, %g0, %g1 217 alignaddr %o1, %g0, %g1
212 add %o1, %o4, %o1 218 add %o1, %o4, %o1
213 EX_LD(LOAD(ldd, %g1 + 0x00, %f0)) 219 EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
2141: EX_LD(LOAD(ldd, %g1 + 0x08, %f2)) 2201: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
215 subcc %o4, 0x40, %o4 221 subcc %o4, 0x40, %o4
216 EX_LD(LOAD(ldd, %g1 + 0x10, %f4)) 222 EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
217 EX_LD(LOAD(ldd, %g1 + 0x18, %f6)) 223 EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
218 EX_LD(LOAD(ldd, %g1 + 0x20, %f8)) 224 EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
219 EX_LD(LOAD(ldd, %g1 + 0x28, %f10)) 225 EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
220 EX_LD(LOAD(ldd, %g1 + 0x30, %f12)) 226 EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
221 EX_LD(LOAD(ldd, %g1 + 0x38, %f14)) 227 EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
222 faligndata %f0, %f2, %f16 228 faligndata %f0, %f2, %f16
223 EX_LD(LOAD(ldd, %g1 + 0x40, %f0)) 229 EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
224 faligndata %f2, %f4, %f18 230 faligndata %f2, %f4, %f18
225 add %g1, 0x40, %g1 231 add %g1, 0x40, %g1
226 faligndata %f4, %f6, %f20 232 faligndata %f4, %f6, %f20
@@ -229,14 +235,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
229 faligndata %f10, %f12, %f26 235 faligndata %f10, %f12, %f26
230 faligndata %f12, %f14, %f28 236 faligndata %f12, %f14, %f28
231 faligndata %f14, %f0, %f30 237 faligndata %f14, %f0, %f30
232 EX_ST(STORE(std, %f16, %o0 + 0x00)) 238 EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
233 EX_ST(STORE(std, %f18, %o0 + 0x08)) 239 EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
234 EX_ST(STORE(std, %f20, %o0 + 0x10)) 240 EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
235 EX_ST(STORE(std, %f22, %o0 + 0x18)) 241 EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
236 EX_ST(STORE(std, %f24, %o0 + 0x20)) 242 EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
237 EX_ST(STORE(std, %f26, %o0 + 0x28)) 243 EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
238 EX_ST(STORE(std, %f28, %o0 + 0x30)) 244 EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
239 EX_ST(STORE(std, %f30, %o0 + 0x38)) 245 EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
240 add %o0, 0x40, %o0 246 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 247 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 248 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S
index a6ae2ea04bf5..ecc5692fa2b4 100644
--- a/arch/sparc/lib/U1copy_from_user.S
+++ b/arch/sparc/lib/U1copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME ___copy_from_user 22#define FUNC_NAME ___copy_from_user
15#define LOAD(type,addr,dest) type##a [addr] %asi, dest 23#define LOAD(type,addr,dest) type##a [addr] %asi, dest
16#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest 24#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S
index f4b970eeb485..9eea392e44d4 100644
--- a/arch/sparc/lib/U1copy_to_user.S
+++ b/arch/sparc/lib/U1copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME ___copy_to_user 22#define FUNC_NAME ___copy_to_user
15#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 23#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
16#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS 24#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
diff --git a/arch/sparc/lib/U1memcpy.S b/arch/sparc/lib/U1memcpy.S
index b67142b7768e..3e6209ebb7d7 100644
--- a/arch/sparc/lib/U1memcpy.S
+++ b/arch/sparc/lib/U1memcpy.S
@@ -25,10 +25,16 @@
25#ifndef EX_LD 25#ifndef EX_LD
26#define EX_LD(x) x 26#define EX_LD(x) x
27#endif 27#endif
28#ifndef EX_LD_FP
29#define EX_LD_FP(x) x
30#endif
28 31
29#ifndef EX_ST 32#ifndef EX_ST
30#define EX_ST(x) x 33#define EX_ST(x) x
31#endif 34#endif
35#ifndef EX_ST_FP
36#define EX_ST_FP(x) x
37#endif
32 38
33#ifndef EX_RETVAL 39#ifndef EX_RETVAL
34#define EX_RETVAL(x) x 40#define EX_RETVAL(x) x
@@ -73,8 +79,8 @@
73 faligndata %f8, %f9, %f62; 79 faligndata %f8, %f9, %f62;
74 80
75#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \ 81#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
76 EX_LD(LOAD_BLK(%src, %fdest)); \ 82 EX_LD_FP(LOAD_BLK(%src, %fdest)); \
77 EX_ST(STORE_BLK(%fsrc, %dest)); \ 83 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
78 add %src, 0x40, %src; \ 84 add %src, 0x40, %src; \
79 subcc %len, 0x40, %len; \ 85 subcc %len, 0x40, %len; \
80 be,pn %xcc, jmptgt; \ 86 be,pn %xcc, jmptgt; \
@@ -89,12 +95,12 @@
89 95
90#define DO_SYNC membar #Sync; 96#define DO_SYNC membar #Sync;
91#define STORE_SYNC(dest, fsrc) \ 97#define STORE_SYNC(dest, fsrc) \
92 EX_ST(STORE_BLK(%fsrc, %dest)); \ 98 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
93 add %dest, 0x40, %dest; \ 99 add %dest, 0x40, %dest; \
94 DO_SYNC 100 DO_SYNC
95 101
96#define STORE_JUMP(dest, fsrc, target) \ 102#define STORE_JUMP(dest, fsrc, target) \
97 EX_ST(STORE_BLK(%fsrc, %dest)); \ 103 EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
98 add %dest, 0x40, %dest; \ 104 add %dest, 0x40, %dest; \
99 ba,pt %xcc, target; \ 105 ba,pt %xcc, target; \
100 nop; 106 nop;
@@ -103,7 +109,7 @@
103 subcc %left, 8, %left;\ 109 subcc %left, 8, %left;\
104 bl,pn %xcc, 95f; \ 110 bl,pn %xcc, 95f; \
105 faligndata %f0, %f1, %f48; \ 111 faligndata %f0, %f1, %f48; \
106 EX_ST(STORE(std, %f48, %dest)); \ 112 EX_ST_FP(STORE(std, %f48, %dest)); \
107 add %dest, 8, %dest; 113 add %dest, 8, %dest;
108 114
109#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \ 115#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
@@ -160,8 +166,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
160 and %g2, 0x38, %g2 166 and %g2, 0x38, %g2
161 167
1621: subcc %g1, 0x1, %g1 1681: subcc %g1, 0x1, %g1
163 EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 169 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
164 EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE)) 170 EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
165 bgu,pt %XCC, 1b 171 bgu,pt %XCC, 1b
166 add %o1, 0x1, %o1 172 add %o1, 0x1, %o1
167 173
@@ -172,20 +178,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
172 be,pt %icc, 3f 178 be,pt %icc, 3f
173 alignaddr %o1, %g0, %o1 179 alignaddr %o1, %g0, %o1
174 180
175 EX_LD(LOAD(ldd, %o1, %f4)) 181 EX_LD_FP(LOAD(ldd, %o1, %f4))
1761: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 1821: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
177 add %o1, 0x8, %o1 183 add %o1, 0x8, %o1
178 subcc %g2, 0x8, %g2 184 subcc %g2, 0x8, %g2
179 faligndata %f4, %f6, %f0 185 faligndata %f4, %f6, %f0
180 EX_ST(STORE(std, %f0, %o0)) 186 EX_ST_FP(STORE(std, %f0, %o0))
181 be,pn %icc, 3f 187 be,pn %icc, 3f
182 add %o0, 0x8, %o0 188 add %o0, 0x8, %o0
183 189
184 EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 190 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
185 add %o1, 0x8, %o1 191 add %o1, 0x8, %o1
186 subcc %g2, 0x8, %g2 192 subcc %g2, 0x8, %g2
187 faligndata %f6, %f4, %f0 193 faligndata %f6, %f4, %f0
188 EX_ST(STORE(std, %f0, %o0)) 194 EX_ST_FP(STORE(std, %f0, %o0))
189 bne,pt %icc, 1b 195 bne,pt %icc, 1b
190 add %o0, 0x8, %o0 196 add %o0, 0x8, %o0
191 197
@@ -208,13 +214,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
208 add %g1, %GLOBAL_SPARE, %g1 214 add %g1, %GLOBAL_SPARE, %g1
209 subcc %o2, %g3, %o2 215 subcc %o2, %g3, %o2
210 216
211 EX_LD(LOAD_BLK(%o1, %f0)) 217 EX_LD_FP(LOAD_BLK(%o1, %f0))
212 add %o1, 0x40, %o1 218 add %o1, 0x40, %o1
213 add %g1, %g3, %g1 219 add %g1, %g3, %g1
214 EX_LD(LOAD_BLK(%o1, %f16)) 220 EX_LD_FP(LOAD_BLK(%o1, %f16))
215 add %o1, 0x40, %o1 221 add %o1, 0x40, %o1
216 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE 222 sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
217 EX_LD(LOAD_BLK(%o1, %f32)) 223 EX_LD_FP(LOAD_BLK(%o1, %f32))
218 add %o1, 0x40, %o1 224 add %o1, 0x40, %o1
219 225
220 /* There are 8 instances of the unrolled loop, 226 /* There are 8 instances of the unrolled loop,
@@ -426,28 +432,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
42662: FINISH_VISCHUNK(o0, f44, f46, g3) 43262: FINISH_VISCHUNK(o0, f44, f46, g3)
42763: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3) 43363: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
428 434
42993: EX_LD(LOAD(ldd, %o1, %f2)) 43593: EX_LD_FP(LOAD(ldd, %o1, %f2))
430 add %o1, 8, %o1 436 add %o1, 8, %o1
431 subcc %g3, 8, %g3 437 subcc %g3, 8, %g3
432 faligndata %f0, %f2, %f8 438 faligndata %f0, %f2, %f8
433 EX_ST(STORE(std, %f8, %o0)) 439 EX_ST_FP(STORE(std, %f8, %o0))
434 bl,pn %xcc, 95f 440 bl,pn %xcc, 95f
435 add %o0, 8, %o0 441 add %o0, 8, %o0
436 EX_LD(LOAD(ldd, %o1, %f0)) 442 EX_LD_FP(LOAD(ldd, %o1, %f0))
437 add %o1, 8, %o1 443 add %o1, 8, %o1
438 subcc %g3, 8, %g3 444 subcc %g3, 8, %g3
439 faligndata %f2, %f0, %f8 445 faligndata %f2, %f0, %f8
440 EX_ST(STORE(std, %f8, %o0)) 446 EX_ST_FP(STORE(std, %f8, %o0))
441 bge,pt %xcc, 93b 447 bge,pt %xcc, 93b
442 add %o0, 8, %o0 448 add %o0, 8, %o0
443 449
44495: brz,pt %o2, 2f 45095: brz,pt %o2, 2f
445 mov %g1, %o1 451 mov %g1, %o1
446 452
4471: EX_LD(LOAD(ldub, %o1, %o3)) 4531: EX_LD_FP(LOAD(ldub, %o1, %o3))
448 add %o1, 1, %o1 454 add %o1, 1, %o1
449 subcc %o2, 1, %o2 455 subcc %o2, 1, %o2
450 EX_ST(STORE(stb, %o3, %o0)) 456 EX_ST_FP(STORE(stb, %o3, %o0))
451 bne,pt %xcc, 1b 457 bne,pt %xcc, 1b
452 add %o0, 1, %o0 458 add %o0, 1, %o0
453 459
diff --git a/arch/sparc/lib/U3copy_from_user.S b/arch/sparc/lib/U3copy_from_user.S
index b1acd1331c33..88ad73d86fe4 100644
--- a/arch/sparc/lib/U3copy_from_user.S
+++ b/arch/sparc/lib/U3copy_from_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_LD_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME U3copy_from_user 22#define FUNC_NAME U3copy_from_user
15#define LOAD(type,addr,dest) type##a [addr] %asi, dest 23#define LOAD(type,addr,dest) type##a [addr] %asi, dest
16#define EX_RETVAL(x) 0 24#define EX_RETVAL(x) 0
diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S
index ef1e493afdfa..845139d75537 100644
--- a/arch/sparc/lib/U3copy_to_user.S
+++ b/arch/sparc/lib/U3copy_to_user.S
@@ -11,6 +11,14 @@
11 .text; \ 11 .text; \
12 .align 4; 12 .align 4;
13 13
14#define EX_ST_FP(x) \
1598: x; \
16 .section __ex_table,"a";\
17 .align 4; \
18 .word 98b, __retl_one_fp;\
19 .text; \
20 .align 4;
21
14#define FUNC_NAME U3copy_to_user 22#define FUNC_NAME U3copy_to_user
15#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS 23#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
16#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS 24#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
diff --git a/arch/sparc/lib/U3memcpy.S b/arch/sparc/lib/U3memcpy.S
index 7cae9cc6a204..491ee69e4995 100644
--- a/arch/sparc/lib/U3memcpy.S
+++ b/arch/sparc/lib/U3memcpy.S
@@ -24,10 +24,16 @@
24#ifndef EX_LD 24#ifndef EX_LD
25#define EX_LD(x) x 25#define EX_LD(x) x
26#endif 26#endif
27#ifndef EX_LD_FP
28#define EX_LD_FP(x) x
29#endif
27 30
28#ifndef EX_ST 31#ifndef EX_ST
29#define EX_ST(x) x 32#define EX_ST(x) x
30#endif 33#endif
34#ifndef EX_ST_FP
35#define EX_ST_FP(x) x
36#endif
31 37
32#ifndef EX_RETVAL 38#ifndef EX_RETVAL
33#define EX_RETVAL(x) x 39#define EX_RETVAL(x) x
@@ -120,8 +126,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
120 and %g2, 0x38, %g2 126 and %g2, 0x38, %g2
121 127
1221: subcc %g1, 0x1, %g1 1281: subcc %g1, 0x1, %g1
123 EX_LD(LOAD(ldub, %o1 + 0x00, %o3)) 129 EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
124 EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE)) 130 EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
125 bgu,pt %XCC, 1b 131 bgu,pt %XCC, 1b
126 add %o1, 0x1, %o1 132 add %o1, 0x1, %o1
127 133
@@ -132,20 +138,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
132 be,pt %icc, 3f 138 be,pt %icc, 3f
133 alignaddr %o1, %g0, %o1 139 alignaddr %o1, %g0, %o1
134 140
135 EX_LD(LOAD(ldd, %o1, %f4)) 141 EX_LD_FP(LOAD(ldd, %o1, %f4))
1361: EX_LD(LOAD(ldd, %o1 + 0x8, %f6)) 1421: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
137 add %o1, 0x8, %o1 143 add %o1, 0x8, %o1
138 subcc %g2, 0x8, %g2 144 subcc %g2, 0x8, %g2
139 faligndata %f4, %f6, %f0 145 faligndata %f4, %f6, %f0
140 EX_ST(STORE(std, %f0, %o0)) 146 EX_ST_FP(STORE(std, %f0, %o0))
141 be,pn %icc, 3f 147 be,pn %icc, 3f
142 add %o0, 0x8, %o0 148 add %o0, 0x8, %o0
143 149
144 EX_LD(LOAD(ldd, %o1 + 0x8, %f4)) 150 EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
145 add %o1, 0x8, %o1 151 add %o1, 0x8, %o1
146 subcc %g2, 0x8, %g2 152 subcc %g2, 0x8, %g2
147 faligndata %f6, %f4, %f2 153 faligndata %f6, %f4, %f2
148 EX_ST(STORE(std, %f2, %o0)) 154 EX_ST_FP(STORE(std, %f2, %o0))
149 bne,pt %icc, 1b 155 bne,pt %icc, 1b
150 add %o0, 0x8, %o0 156 add %o0, 0x8, %o0
151 157
@@ -155,25 +161,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
155 LOAD(prefetch, %o1 + 0x080, #one_read) 161 LOAD(prefetch, %o1 + 0x080, #one_read)
156 LOAD(prefetch, %o1 + 0x0c0, #one_read) 162 LOAD(prefetch, %o1 + 0x0c0, #one_read)
157 LOAD(prefetch, %o1 + 0x100, #one_read) 163 LOAD(prefetch, %o1 + 0x100, #one_read)
158 EX_LD(LOAD(ldd, %o1 + 0x000, %f0)) 164 EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
159 LOAD(prefetch, %o1 + 0x140, #one_read) 165 LOAD(prefetch, %o1 + 0x140, #one_read)
160 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 166 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
161 LOAD(prefetch, %o1 + 0x180, #one_read) 167 LOAD(prefetch, %o1 + 0x180, #one_read)
162 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 168 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
163 LOAD(prefetch, %o1 + 0x1c0, #one_read) 169 LOAD(prefetch, %o1 + 0x1c0, #one_read)
164 faligndata %f0, %f2, %f16 170 faligndata %f0, %f2, %f16
165 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 171 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
166 faligndata %f2, %f4, %f18 172 faligndata %f2, %f4, %f18
167 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 173 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
168 faligndata %f4, %f6, %f20 174 faligndata %f4, %f6, %f20
169 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 175 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
170 faligndata %f6, %f8, %f22 176 faligndata %f6, %f8, %f22
171 177
172 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 178 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
173 faligndata %f8, %f10, %f24 179 faligndata %f8, %f10, %f24
174 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 180 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
175 faligndata %f10, %f12, %f26 181 faligndata %f10, %f12, %f26
176 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 182 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
177 183
178 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE 184 subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
179 add %o1, 0x40, %o1 185 add %o1, 0x40, %o1
@@ -184,26 +190,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
184 190
185 .align 64 191 .align 64
1861: 1921:
187 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 193 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
188 faligndata %f12, %f14, %f28 194 faligndata %f12, %f14, %f28
189 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 195 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
190 faligndata %f14, %f0, %f30 196 faligndata %f14, %f0, %f30
191 EX_ST(STORE_BLK(%f16, %o0)) 197 EX_ST_FP(STORE_BLK(%f16, %o0))
192 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 198 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
193 faligndata %f0, %f2, %f16 199 faligndata %f0, %f2, %f16
194 add %o0, 0x40, %o0 200 add %o0, 0x40, %o0
195 201
196 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 202 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
197 faligndata %f2, %f4, %f18 203 faligndata %f2, %f4, %f18
198 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 204 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
199 faligndata %f4, %f6, %f20 205 faligndata %f4, %f6, %f20
200 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 206 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
201 subcc %o3, 0x01, %o3 207 subcc %o3, 0x01, %o3
202 faligndata %f6, %f8, %f22 208 faligndata %f6, %f8, %f22
203 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 209 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
204 210
205 faligndata %f8, %f10, %f24 211 faligndata %f8, %f10, %f24
206 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 212 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
207 LOAD(prefetch, %o1 + 0x1c0, #one_read) 213 LOAD(prefetch, %o1 + 0x1c0, #one_read)
208 faligndata %f10, %f12, %f26 214 faligndata %f10, %f12, %f26
209 bg,pt %XCC, 1b 215 bg,pt %XCC, 1b
@@ -211,29 +217,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
211 217
212 /* Finally we copy the last full 64-byte block. */ 218 /* Finally we copy the last full 64-byte block. */
2132: 2192:
214 EX_LD(LOAD(ldd, %o1 + 0x008, %f2)) 220 EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
215 faligndata %f12, %f14, %f28 221 faligndata %f12, %f14, %f28
216 EX_LD(LOAD(ldd, %o1 + 0x010, %f4)) 222 EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
217 faligndata %f14, %f0, %f30 223 faligndata %f14, %f0, %f30
218 EX_ST(STORE_BLK(%f16, %o0)) 224 EX_ST_FP(STORE_BLK(%f16, %o0))
219 EX_LD(LOAD(ldd, %o1 + 0x018, %f6)) 225 EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
220 faligndata %f0, %f2, %f16 226 faligndata %f0, %f2, %f16
221 EX_LD(LOAD(ldd, %o1 + 0x020, %f8)) 227 EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
222 faligndata %f2, %f4, %f18 228 faligndata %f2, %f4, %f18
223 EX_LD(LOAD(ldd, %o1 + 0x028, %f10)) 229 EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
224 faligndata %f4, %f6, %f20 230 faligndata %f4, %f6, %f20
225 EX_LD(LOAD(ldd, %o1 + 0x030, %f12)) 231 EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
226 faligndata %f6, %f8, %f22 232 faligndata %f6, %f8, %f22
227 EX_LD(LOAD(ldd, %o1 + 0x038, %f14)) 233 EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
228 faligndata %f8, %f10, %f24 234 faligndata %f8, %f10, %f24
229 cmp %g1, 0 235 cmp %g1, 0
230 be,pt %XCC, 1f 236 be,pt %XCC, 1f
231 add %o0, 0x40, %o0 237 add %o0, 0x40, %o0
232 EX_LD(LOAD(ldd, %o1 + 0x040, %f0)) 238 EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
2331: faligndata %f10, %f12, %f26 2391: faligndata %f10, %f12, %f26
234 faligndata %f12, %f14, %f28 240 faligndata %f12, %f14, %f28
235 faligndata %f14, %f0, %f30 241 faligndata %f14, %f0, %f30
236 EX_ST(STORE_BLK(%f16, %o0)) 242 EX_ST_FP(STORE_BLK(%f16, %o0))
237 add %o0, 0x40, %o0 243 add %o0, 0x40, %o0
238 add %o1, 0x40, %o1 244 add %o1, 0x40, %o1
239 membar #Sync 245 membar #Sync
@@ -253,20 +259,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
253 259
254 sub %o2, %g2, %o2 260 sub %o2, %g2, %o2
255 be,a,pt %XCC, 1f 261 be,a,pt %XCC, 1f
256 EX_LD(LOAD(ldd, %o1 + 0x00, %f0)) 262 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
257 263
2581: EX_LD(LOAD(ldd, %o1 + 0x08, %f2)) 2641: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
259 add %o1, 0x8, %o1 265 add %o1, 0x8, %o1
260 subcc %g2, 0x8, %g2 266 subcc %g2, 0x8, %g2
261 faligndata %f0, %f2, %f8 267 faligndata %f0, %f2, %f8
262 EX_ST(STORE(std, %f8, %o0)) 268 EX_ST_FP(STORE(std, %f8, %o0))
263 be,pn %XCC, 2f 269 be,pn %XCC, 2f
264 add %o0, 0x8, %o0 270 add %o0, 0x8, %o0
265 EX_LD(LOAD(ldd, %o1 + 0x08, %f0)) 271 EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
266 add %o1, 0x8, %o1 272 add %o1, 0x8, %o1
267 subcc %g2, 0x8, %g2 273 subcc %g2, 0x8, %g2
268 faligndata %f2, %f0, %f8 274 faligndata %f2, %f0, %f8
269 EX_ST(STORE(std, %f8, %o0)) 275 EX_ST_FP(STORE(std, %f8, %o0))
270 bne,pn %XCC, 1b 276 bne,pn %XCC, 1b
271 add %o0, 0x8, %o0 277 add %o0, 0x8, %o0
272 278
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index bb509cee3b59..8767060d70fb 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -21,7 +21,7 @@
21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 21 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
22 * Copyright (C) 2009 Jaswinder Singh Rajput 22 * Copyright (C) 2009 Jaswinder Singh Rajput
23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 23 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 24 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 25 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
26 * Copyright (C) 2009 Google, Inc., Stephane Eranian 26 * Copyright (C) 2009 Google, Inc., Stephane Eranian
27 */ 27 */
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 25ed4098640e..e3abe6f3156d 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
131# The wrappers will select whether using "malloc" or the kernel allocator. 131# The wrappers will select whether using "malloc" or the kernel allocator.
132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc 132LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
133 133
134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt 134LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
135 135
136# Used by link-vmlinux.sh which has special support for um link 136# Used by link-vmlinux.sh which has special support for um link
137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) 137export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
diff --git a/arch/um/drivers/net_user.c b/arch/um/drivers/net_user.c
index e697a4136707..e9f8445861dc 100644
--- a/arch/um/drivers/net_user.c
+++ b/arch/um/drivers/net_user.c
@@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
249 249
250char *split_if_spec(char *str, ...) 250char *split_if_spec(char *str, ...)
251{ 251{
252 char **arg, *end; 252 char **arg, *end, *ret = NULL;
253 va_list ap; 253 va_list ap;
254 254
255 va_start(ap, str); 255 va_start(ap, str);
256 while ((arg = va_arg(ap, char **)) != NULL) { 256 while ((arg = va_arg(ap, char **)) != NULL) {
257 if (*str == '\0') 257 if (*str == '\0')
258 return NULL; 258 goto out;
259 end = strchr(str, ','); 259 end = strchr(str, ',');
260 if (end != str) 260 if (end != str)
261 *arg = str; 261 *arg = str;
262 if (end == NULL) 262 if (end == NULL)
263 return NULL; 263 goto out;
264 *end++ = '\0'; 264 *end++ = '\0';
265 str = end; 265 str = end;
266 } 266 }
267 ret = str;
268out:
267 va_end(ap); 269 va_end(ap);
268 return str; 270 return ret;
269} 271}
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 57acbd67d85d..fc8be0e3a4ff 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
69 struct ksignal ksig; 69 struct ksignal ksig;
70 int handled_sig = 0; 70 int handled_sig = 0;
71 71
72 while (get_signal(&ksig)) { 72 if (get_signal(&ksig)) {
73 handled_sig = 1; 73 handled_sig = 1;
74 /* Whee! Actually deliver the signal. */ 74 /* Whee! Actually deliver the signal. */
75 handle_signal(&ksig, regs); 75 handle_signal(&ksig, regs);
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 0033e96c3f09..9011a88353de 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -23,7 +23,6 @@
23#include <stdarg.h> 23#include <stdarg.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/edd.h> 25#include <linux/edd.h>
26#include <asm/boot.h>
27#include <asm/setup.h> 26#include <asm/setup.h>
28#include "bitops.h" 27#include "bitops.h"
29#include "ctype.h" 28#include "ctype.h"
diff --git a/arch/x86/boot/video-mode.c b/arch/x86/boot/video-mode.c
index aa8a96b052e3..95c7a818c0ed 100644
--- a/arch/x86/boot/video-mode.c
+++ b/arch/x86/boot/video-mode.c
@@ -19,6 +19,8 @@
19#include "video.h" 19#include "video.h"
20#include "vesa.h" 20#include "vesa.h"
21 21
22#include <uapi/asm/boot.h>
23
22/* 24/*
23 * Common variables 25 * Common variables
24 */ 26 */
diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
index 05111bb8d018..77780e386e9b 100644
--- a/arch/x86/boot/video.c
+++ b/arch/x86/boot/video.c
@@ -13,6 +13,8 @@
13 * Select video mode 13 * Select video mode
14 */ 14 */
15 15
16#include <uapi/asm/boot.h>
17
16#include "boot.h" 18#include "boot.h"
17#include "video.h" 19#include "video.h"
18#include "vesa.h" 20#include "vesa.h"
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 53616ca03244..a55697d19824 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -509,6 +509,17 @@ END(irq_entries_start)
509 * tracking that we're in kernel mode. 509 * tracking that we're in kernel mode.
510 */ 510 */
511 SWAPGS 511 SWAPGS
512
513 /*
514 * We need to tell lockdep that IRQs are off. We can't do this until
515 * we fix gsbase, and we should do it before enter_from_user_mode
516 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
517 * the simplest way to handle it is to just call it twice if
518 * we enter from user mode. There's no reason to optimize this since
519 * TRACE_IRQS_OFF is a no-op if lockdep is off.
520 */
521 TRACE_IRQS_OFF
522
512#ifdef CONFIG_CONTEXT_TRACKING 523#ifdef CONFIG_CONTEXT_TRACKING
513 call enter_from_user_mode 524 call enter_from_user_mode
514#endif 525#endif
@@ -1049,12 +1060,18 @@ ENTRY(error_entry)
1049 SWAPGS 1060 SWAPGS
1050 1061
1051.Lerror_entry_from_usermode_after_swapgs: 1062.Lerror_entry_from_usermode_after_swapgs:
1063 /*
1064 * We need to tell lockdep that IRQs are off. We can't do this until
1065 * we fix gsbase, and we should do it before enter_from_user_mode
1066 * (which can take locks).
1067 */
1068 TRACE_IRQS_OFF
1052#ifdef CONFIG_CONTEXT_TRACKING 1069#ifdef CONFIG_CONTEXT_TRACKING
1053 call enter_from_user_mode 1070 call enter_from_user_mode
1054#endif 1071#endif
1072 ret
1055 1073
1056.Lerror_entry_done: 1074.Lerror_entry_done:
1057
1058 TRACE_IRQS_OFF 1075 TRACE_IRQS_OFF
1059 ret 1076 ret
1060 1077
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index c5b7fb2774d0..cc071c6f7d4d 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -9,19 +9,21 @@
9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) 9#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
10#define PAGE_MASK (~(PAGE_SIZE-1)) 10#define PAGE_MASK (~(PAGE_SIZE-1))
11 11
12#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT)
13#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
14
15#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
16#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
17
12#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) 18#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
13#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 19#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
14 20
15/* Cast PAGE_MASK to a signed type so that it is sign-extended if 21/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
16 virtual addresses are 32-bits but physical addresses are larger 22 virtual addresses are 32-bits but physical addresses are larger
17 (ie, 32-bit PAE). */ 23 (ie, 32-bit PAE). */
18#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK) 24#define PHYSICAL_PAGE_MASK (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
19 25#define PHYSICAL_PMD_PAGE_MASK (((signed long)PMD_PAGE_MASK) & __PHYSICAL_MASK)
20#define PMD_PAGE_SIZE (_AC(1, UL) << PMD_SHIFT) 26#define PHYSICAL_PUD_PAGE_MASK (((signed long)PUD_PAGE_MASK) & __PHYSICAL_MASK)
21#define PMD_PAGE_MASK (~(PMD_PAGE_SIZE-1))
22
23#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
24#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
25 27
26#define HPAGE_SHIFT PMD_SHIFT 28#define HPAGE_SHIFT PMD_SHIFT
27#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT) 29#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dd5b0aa9dd2f..a471cadb9630 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -279,17 +279,14 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
279static inline pudval_t pud_pfn_mask(pud_t pud) 279static inline pudval_t pud_pfn_mask(pud_t pud)
280{ 280{
281 if (native_pud_val(pud) & _PAGE_PSE) 281 if (native_pud_val(pud) & _PAGE_PSE)
282 return PUD_PAGE_MASK & PHYSICAL_PAGE_MASK; 282 return PHYSICAL_PUD_PAGE_MASK;
283 else 283 else
284 return PTE_PFN_MASK; 284 return PTE_PFN_MASK;
285} 285}
286 286
287static inline pudval_t pud_flags_mask(pud_t pud) 287static inline pudval_t pud_flags_mask(pud_t pud)
288{ 288{
289 if (native_pud_val(pud) & _PAGE_PSE) 289 return ~pud_pfn_mask(pud);
290 return ~(PUD_PAGE_MASK & (pudval_t)PHYSICAL_PAGE_MASK);
291 else
292 return ~PTE_PFN_MASK;
293} 290}
294 291
295static inline pudval_t pud_flags(pud_t pud) 292static inline pudval_t pud_flags(pud_t pud)
@@ -300,17 +297,14 @@ static inline pudval_t pud_flags(pud_t pud)
300static inline pmdval_t pmd_pfn_mask(pmd_t pmd) 297static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
301{ 298{
302 if (native_pmd_val(pmd) & _PAGE_PSE) 299 if (native_pmd_val(pmd) & _PAGE_PSE)
303 return PMD_PAGE_MASK & PHYSICAL_PAGE_MASK; 300 return PHYSICAL_PMD_PAGE_MASK;
304 else 301 else
305 return PTE_PFN_MASK; 302 return PTE_PFN_MASK;
306} 303}
307 304
308static inline pmdval_t pmd_flags_mask(pmd_t pmd) 305static inline pmdval_t pmd_flags_mask(pmd_t pmd)
309{ 306{
310 if (native_pmd_val(pmd) & _PAGE_PSE) 307 return ~pmd_pfn_mask(pmd);
311 return ~(PMD_PAGE_MASK & (pmdval_t)PHYSICAL_PAGE_MASK);
312 else
313 return ~PTE_PFN_MASK;
314} 308}
315 309
316static inline pmdval_t pmd_flags(pmd_t pmd) 310static inline pmdval_t pmd_flags(pmd_t pmd)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 48d34d28f5a6..cd0fc0cc78bc 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -1,7 +1,6 @@
1#ifndef _ASM_X86_PLATFORM_H 1#ifndef _ASM_X86_PLATFORM_H
2#define _ASM_X86_PLATFORM_H 2#define _ASM_X86_PLATFORM_H
3 3
4#include <asm/pgtable_types.h>
5#include <asm/bootparam.h> 4#include <asm/bootparam.h>
6 5
7struct mpc_bus; 6struct mpc_bus;
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 7fc27f1cca58..b3e94ef461fd 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -698,3 +698,4 @@ int __init microcode_init(void)
698 return error; 698 return error;
699 699
700} 700}
701late_initcall(microcode_init);
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 4562cf070c27..2bf79d7c97df 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 499f533dd3cc..d0e35ebb2adb 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -5,7 +5,7 @@
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar 5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput 6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter 7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com> 9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian 10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
11 * 11 *
@@ -387,7 +387,7 @@ struct cpu_hw_events {
387/* Check flags and event code/umask, and set the HSW N/A flag */ 387/* Check flags and event code/umask, and set the HSW N/A flag */
388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \ 388#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
389 __EVENT_CONSTRAINT(code, n, \ 389 __EVENT_CONSTRAINT(code, n, \
390 INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \ 390 INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW) 391 HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
392 392
393 393
@@ -627,6 +627,7 @@ struct x86_perf_task_context {
627 u64 lbr_from[MAX_LBR_ENTRIES]; 627 u64 lbr_from[MAX_LBR_ENTRIES];
628 u64 lbr_to[MAX_LBR_ENTRIES]; 628 u64 lbr_to[MAX_LBR_ENTRIES];
629 u64 lbr_info[MAX_LBR_ENTRIES]; 629 u64 lbr_info[MAX_LBR_ENTRIES];
630 int tos;
630 int lbr_callstack_users; 631 int lbr_callstack_users;
631 int lbr_stack_state; 632 int lbr_stack_state;
632}; 633};
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f63360be2238..e2a430021e46 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ 232 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ 233 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */ 234 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
235 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */ 235 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ 236 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ 237 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ 238 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 377e8f8ed391..a316ca96f1b6 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event) 298static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
299{ 299{
300 if (event->attach_state & PERF_ATTACH_TASK) 300 if (event->attach_state & PERF_ATTACH_TASK)
301 return perf_cgroup_from_task(event->hw.target); 301 return perf_cgroup_from_task(event->hw.target, event->ctx);
302 302
303 return event->cgrp; 303 return event->cgrp;
304} 304}
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index bfd0b717e944..659f01e165d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
239 } 239 }
240 240
241 mask = x86_pmu.lbr_nr - 1; 241 mask = x86_pmu.lbr_nr - 1;
242 tos = intel_pmu_lbr_tos(); 242 tos = task_ctx->tos;
243 for (i = 0; i < tos; i++) { 243 for (i = 0; i < tos; i++) {
244 lbr_idx = (tos - i) & mask; 244 lbr_idx = (tos - i) & mask;
245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 245 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
@@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 247 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 248 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
249 } 249 }
250 wrmsrl(x86_pmu.lbr_tos, tos);
250 task_ctx->lbr_stack_state = LBR_NONE; 251 task_ctx->lbr_stack_state = LBR_NONE;
251} 252}
252 253
@@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
270 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 271 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
271 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 272 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
272 } 273 }
274 task_ctx->tos = tos;
273 task_ctx->lbr_stack_state = LBR_VALID; 275 task_ctx->lbr_stack_state = LBR_VALID;
274} 276}
275 277
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index dc5fa6a1e8d6..3512ba607361 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * x86 specific code for irq_work 2 * x86 specific code for irq_work
3 * 3 *
4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
5 */ 5 */
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
index 4f00b63d7ff3..14415aff1813 100644
--- a/arch/x86/kernel/pmem.c
+++ b/arch/x86/kernel/pmem.c
@@ -4,10 +4,22 @@
4 */ 4 */
5#include <linux/platform_device.h> 5#include <linux/platform_device.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/ioport.h>
8
9static int found(u64 start, u64 end, void *data)
10{
11 return 1;
12}
7 13
8static __init int register_e820_pmem(void) 14static __init int register_e820_pmem(void)
9{ 15{
16 char *pmem = "Persistent Memory (legacy)";
10 struct platform_device *pdev; 17 struct platform_device *pdev;
18 int rc;
19
20 rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
21 if (rc <= 0)
22 return 0;
11 23
12 /* 24 /*
13 * See drivers/nvdimm/e820.c for the implementation, this is 25 * See drivers/nvdimm/e820.c for the implementation, this is
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 29db25f9a745..d2bbe343fda7 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1250,8 +1250,6 @@ void __init setup_arch(char **cmdline_p)
1250 if (efi_enabled(EFI_BOOT)) 1250 if (efi_enabled(EFI_BOOT))
1251 efi_apply_memmap_quirks(); 1251 efi_apply_memmap_quirks();
1252#endif 1252#endif
1253
1254 microcode_init();
1255} 1253}
1256 1254
1257#ifdef CONFIG_X86_32 1255#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index b7ffb7c00075..cb6282c3638f 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -690,12 +690,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
690 signal_setup_done(failed, ksig, stepping); 690 signal_setup_done(failed, ksig, stepping);
691} 691}
692 692
693#ifdef CONFIG_X86_32 693static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
694#define NR_restart_syscall __NR_restart_syscall 694{
695#else /* !CONFIG_X86_32 */ 695#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
696#define NR_restart_syscall \ 696 return __NR_restart_syscall;
697 test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall : __NR_restart_syscall 697#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
698#endif /* CONFIG_X86_32 */ 698 return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
699 __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
700#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
701}
699 702
700/* 703/*
701 * Note that 'init' is a special process: it doesn't get signals it doesn't 704 * Note that 'init' is a special process: it doesn't get signals it doesn't
@@ -724,7 +727,7 @@ void do_signal(struct pt_regs *regs)
724 break; 727 break;
725 728
726 case -ERESTART_RESTARTBLOCK: 729 case -ERESTART_RESTARTBLOCK:
727 regs->ax = NR_restart_syscall; 730 regs->ax = get_nr_restart_syscall(regs);
728 regs->ip -= 2; 731 regs->ip -= 2;
729 break; 732 break;
730 } 733 }
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 892ee2e5ecbc..fbabe4fcc7fb 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -509,7 +509,7 @@ void __inquire_remote_apic(int apicid)
509 */ 509 */
510#define UDELAY_10MS_DEFAULT 10000 510#define UDELAY_10MS_DEFAULT 10000
511 511
512static unsigned int init_udelay = INT_MAX; 512static unsigned int init_udelay = UINT_MAX;
513 513
514static int __init cpu_init_udelay(char *str) 514static int __init cpu_init_udelay(char *str)
515{ 515{
@@ -522,14 +522,15 @@ early_param("cpu_init_udelay", cpu_init_udelay);
522static void __init smp_quirk_init_udelay(void) 522static void __init smp_quirk_init_udelay(void)
523{ 523{
524 /* if cmdline changed it from default, leave it alone */ 524 /* if cmdline changed it from default, leave it alone */
525 if (init_udelay != INT_MAX) 525 if (init_udelay != UINT_MAX)
526 return; 526 return;
527 527
528 /* if modern processor, use no delay */ 528 /* if modern processor, use no delay */
529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || 529 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) 530 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) {
531 init_udelay = 0; 531 init_udelay = 0;
532 532 return;
533 }
533 /* else, use legacy delay */ 534 /* else, use legacy delay */
534 init_udelay = UDELAY_10MS_DEFAULT; 535 init_udelay = UDELAY_10MS_DEFAULT;
535} 536}
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 06332cb7e7d1..3f5c48ddba45 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
38 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 38 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
39} 39}
40 40
41static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
42{
43 struct kvm_cpuid_entry2 *best;
44
45 best = kvm_find_cpuid_entry(vcpu, 1, 0);
46 return best && (best->edx & bit(X86_FEATURE_MTRR));
47}
48
41static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu) 49static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
42{ 50{
43 struct kvm_cpuid_entry2 *best; 51 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9e8bf13572e6..3f8c732117ec 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void) 123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{ 124{
125 /* 125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory. 128 * memory type is applied to all of physical memory.
129 *
130 * However, virtual machines can be run with CPUID such that
131 * there are no MTRRs. In that case, the firmware will never
132 * enable MTRRs and it is obviously undesirable to run the
133 * guest entirely with UC memory and we use WB.
129 */ 134 */
130 return MTRR_TYPE_UNCACHABLE; 135 if (guest_cpuid_has_mtrr(vcpu))
136 return MTRR_TYPE_UNCACHABLE;
137 else
138 return MTRR_TYPE_WRBACK;
131} 139}
132 140
133/* 141/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267 275
268 for (seg = 0; seg < seg_num; seg++) { 276 for (seg = 0; seg < seg_num; seg++) {
269 mtrr_seg = &fixed_seg_table[seg]; 277 mtrr_seg = &fixed_seg_table[seg];
270 if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 278 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271 return seg; 279 return seg;
272 } 280 }
273 281
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300 *start = range->base & PAGE_MASK; 308 *start = range->base & PAGE_MASK;
301 309
302 mask = range->mask & PAGE_MASK; 310 mask = range->mask & PAGE_MASK;
303 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304 311
305 /* This cannot overflow because writing to the reserved bits of 312 /* This cannot overflow because writing to the reserved bits of
306 * variable MTRRs causes a #GP. 313 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356 if (var_mtrr_range_is_valid(cur)) 363 if (var_mtrr_range_is_valid(cur))
357 list_del(&mtrr_state->var_ranges[index].node); 364 list_del(&mtrr_state->var_ranges[index].node);
358 365
366 /* Extend the mask with all 1 bits to the left, since those
367 * bits must implicitly be 0. The bits are then cleared
368 * when reading them.
369 */
359 if (!is_mtrr_mask) 370 if (!is_mtrr_mask)
360 cur->base = data; 371 cur->base = data;
361 else 372 else
362 cur->mask = data; 373 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363 374
364 /* add it to the list if it's enabled. */ 375 /* add it to the list if it's enabled. */
365 if (var_mtrr_range_is_valid(cur)) { 376 if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 437 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 else 438 else
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 439 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429 } 442 }
430 443
431 return 0; 444 return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670 } 683 }
671 684
672 if (iter.mtrr_disabled) 685 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 686 return mtrr_disabled_type(vcpu);
674 687
675 /* not contained in any MTRRs. */ 688 /* not contained in any MTRRs. */
676 if (type == -1) 689 if (type == -1)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 83a1c643f9a5..899c40f826dd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3422 struct kvm_run *kvm_run = vcpu->run; 3422 struct kvm_run *kvm_run = vcpu->run;
3423 u32 exit_code = svm->vmcb->control.exit_code; 3423 u32 exit_code = svm->vmcb->control.exit_code;
3424 3424
3425 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3426
3425 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3427 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3426 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3428 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3427 if (npt_enabled) 3429 if (npt_enabled)
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3892 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3894 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3893 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3895 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3894 3896
3895 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3896
3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3897 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3898 kvm_before_handle_nmi(&svm->vcpu); 3898 kvm_before_handle_nmi(&svm->vcpu);
3899 3899
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 87acc5221740..44976a596fa6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2803 msr_info->data = vcpu->arch.ia32_xss; 2803 msr_info->data = vcpu->arch.ia32_xss;
2804 break; 2804 break;
2805 case MSR_TSC_AUX: 2805 case MSR_TSC_AUX:
2806 if (!guest_cpuid_has_rdtscp(vcpu)) 2806 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2807 return 1; 2807 return 1;
2808 /* Otherwise falls through */ 2808 /* Otherwise falls through */
2809 default: 2809 default:
@@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); 2909 clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
2910 break; 2910 break;
2911 case MSR_TSC_AUX: 2911 case MSR_TSC_AUX:
2912 if (!guest_cpuid_has_rdtscp(vcpu)) 2912 if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
2913 return 1; 2913 return 1;
2914 /* Check reserved bit, higher 32 bits should be zero */ 2914 /* Check reserved bit, higher 32 bits should be zero */
2915 if ((data >> 32) != 0) 2915 if ((data >> 32) != 0)
@@ -7394,11 +7394,6 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
7394 7394
7395 switch (type) { 7395 switch (type) {
7396 case VMX_VPID_EXTENT_ALL_CONTEXT: 7396 case VMX_VPID_EXTENT_ALL_CONTEXT:
7397 if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
7398 nested_vmx_failValid(vcpu,
7399 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
7400 return 1;
7401 }
7402 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02); 7397 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
7403 nested_vmx_succeed(vcpu); 7398 nested_vmx_succeed(vcpu);
7404 break; 7399 break;
@@ -8047,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8047 u32 exit_reason = vmx->exit_reason; 8042 u32 exit_reason = vmx->exit_reason;
8048 u32 vectoring_info = vmx->idt_vectoring_info; 8043 u32 vectoring_info = vmx->idt_vectoring_info;
8049 8044
8045 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
8046
8050 /* 8047 /*
8051 * Flush logged GPAs PML buffer, this will make dirty_bitmap more 8048 * Flush logged GPAs PML buffer, this will make dirty_bitmap more
8052 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before 8049 * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -8673,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
8673 vmx->loaded_vmcs->launched = 1; 8670 vmx->loaded_vmcs->launched = 1;
8674 8671
8675 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 8672 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
8676 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
8677 8673
8678 /* 8674 /*
8679 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if 8675 * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 00462bd63129..7ffc224bbe41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2763,6 +2763,26 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2763 return 0; 2763 return 0;
2764} 2764}
2765 2765
2766static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
2767{
2768 return (!lapic_in_kernel(vcpu) ||
2769 kvm_apic_accept_pic_intr(vcpu));
2770}
2771
2772/*
2773 * if userspace requested an interrupt window, check that the
2774 * interrupt window is open.
2775 *
2776 * No need to exit to userspace if we already have an interrupt queued.
2777 */
2778static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
2779{
2780 return kvm_arch_interrupt_allowed(vcpu) &&
2781 !kvm_cpu_has_interrupt(vcpu) &&
2782 !kvm_event_needs_reinjection(vcpu) &&
2783 kvm_cpu_accept_dm_intr(vcpu);
2784}
2785
2766static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, 2786static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2767 struct kvm_interrupt *irq) 2787 struct kvm_interrupt *irq)
2768{ 2788{
@@ -2786,6 +2806,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2786 return -EEXIST; 2806 return -EEXIST;
2787 2807
2788 vcpu->arch.pending_external_vector = irq->irq; 2808 vcpu->arch.pending_external_vector = irq->irq;
2809 kvm_make_request(KVM_REQ_EVENT, vcpu);
2789 return 0; 2810 return 0;
2790} 2811}
2791 2812
@@ -3551,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3551 3572
3552static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps) 3573static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
3553{ 3574{
3575 int i;
3554 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3576 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3555 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state)); 3577 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
3556 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0); 3578 for (i = 0; i < 3; i++)
3579 kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
3557 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3580 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3558 return 0; 3581 return 0;
3559} 3582}
@@ -3572,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3572static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) 3595static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3573{ 3596{
3574 int start = 0; 3597 int start = 0;
3598 int i;
3575 u32 prev_legacy, cur_legacy; 3599 u32 prev_legacy, cur_legacy;
3576 mutex_lock(&kvm->arch.vpit->pit_state.lock); 3600 mutex_lock(&kvm->arch.vpit->pit_state.lock);
3577 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; 3601 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
@@ -3581,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3581 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels, 3605 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3582 sizeof(kvm->arch.vpit->pit_state.channels)); 3606 sizeof(kvm->arch.vpit->pit_state.channels));
3583 kvm->arch.vpit->pit_state.flags = ps->flags; 3607 kvm->arch.vpit->pit_state.flags = ps->flags;
3584 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start); 3608 for (i = 0; i < 3; i++)
3609 kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
3585 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 3610 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3586 return 0; 3611 return 0;
3587} 3612}
@@ -5910,23 +5935,10 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5910 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL); 5935 return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5911} 5936}
5912 5937
5913/*
5914 * Check if userspace requested an interrupt window, and that the
5915 * interrupt window is open.
5916 *
5917 * No need to exit to userspace if we already have an interrupt queued.
5918 */
5919static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) 5938static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5920{ 5939{
5921 if (!vcpu->run->request_interrupt_window || pic_in_kernel(vcpu->kvm)) 5940 return vcpu->run->request_interrupt_window &&
5922 return false; 5941 likely(!pic_in_kernel(vcpu->kvm));
5923
5924 if (kvm_cpu_has_interrupt(vcpu))
5925 return false;
5926
5927 return (irqchip_split(vcpu->kvm)
5928 ? kvm_apic_accept_pic_intr(vcpu)
5929 : kvm_arch_interrupt_allowed(vcpu));
5930} 5942}
5931 5943
5932static void post_kvm_run_save(struct kvm_vcpu *vcpu) 5944static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -5937,17 +5949,9 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5937 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; 5949 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0;
5938 kvm_run->cr8 = kvm_get_cr8(vcpu); 5950 kvm_run->cr8 = kvm_get_cr8(vcpu);
5939 kvm_run->apic_base = kvm_get_apic_base(vcpu); 5951 kvm_run->apic_base = kvm_get_apic_base(vcpu);
5940 if (!irqchip_in_kernel(vcpu->kvm)) 5952 kvm_run->ready_for_interrupt_injection =
5941 kvm_run->ready_for_interrupt_injection = 5953 pic_in_kernel(vcpu->kvm) ||
5942 kvm_arch_interrupt_allowed(vcpu) && 5954 kvm_vcpu_ready_for_interrupt_injection(vcpu);
5943 !kvm_cpu_has_interrupt(vcpu) &&
5944 !kvm_event_needs_reinjection(vcpu);
5945 else if (!pic_in_kernel(vcpu->kvm))
5946 kvm_run->ready_for_interrupt_injection =
5947 kvm_apic_accept_pic_intr(vcpu) &&
5948 !kvm_cpu_has_interrupt(vcpu);
5949 else
5950 kvm_run->ready_for_interrupt_injection = 1;
5951} 5955}
5952 5956
5953static void update_cr8_intercept(struct kvm_vcpu *vcpu) 5957static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -6360,8 +6364,10 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
6360static int vcpu_enter_guest(struct kvm_vcpu *vcpu) 6364static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6361{ 6365{
6362 int r; 6366 int r;
6363 bool req_int_win = !lapic_in_kernel(vcpu) && 6367 bool req_int_win =
6364 vcpu->run->request_interrupt_window; 6368 dm_request_for_irq_injection(vcpu) &&
6369 kvm_cpu_accept_dm_intr(vcpu);
6370
6365 bool req_immediate_exit = false; 6371 bool req_immediate_exit = false;
6366 6372
6367 if (vcpu->requests) { 6373 if (vcpu->requests) {
@@ -6513,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6513 if (req_immediate_exit) 6519 if (req_immediate_exit)
6514 smp_send_reschedule(vcpu->cpu); 6520 smp_send_reschedule(vcpu->cpu);
6515 6521
6522 trace_kvm_entry(vcpu->vcpu_id);
6523 wait_lapic_expire(vcpu);
6516 __kvm_guest_enter(); 6524 __kvm_guest_enter();
6517 6525
6518 if (unlikely(vcpu->arch.switch_db_regs)) { 6526 if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -6525,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6525 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; 6533 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
6526 } 6534 }
6527 6535
6528 trace_kvm_entry(vcpu->vcpu_id);
6529 wait_lapic_expire(vcpu);
6530 kvm_x86_ops->run(vcpu); 6536 kvm_x86_ops->run(vcpu);
6531 6537
6532 /* 6538 /*
@@ -6663,7 +6669,8 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
6663 if (kvm_cpu_has_pending_timer(vcpu)) 6669 if (kvm_cpu_has_pending_timer(vcpu))
6664 kvm_inject_pending_timer_irqs(vcpu); 6670 kvm_inject_pending_timer_irqs(vcpu);
6665 6671
6666 if (dm_request_for_irq_injection(vcpu)) { 6672 if (dm_request_for_irq_injection(vcpu) &&
6673 kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
6667 r = 0; 6674 r = 0;
6668 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 6675 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
6669 ++vcpu->stat.request_irq_exits; 6676 ++vcpu->stat.request_irq_exits;
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index a035c2aa7801..0f1c6fc3ddd8 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
89 { 0/* VMALLOC_START */, "vmalloc() Area" }, 89 { 0/* VMALLOC_START */, "vmalloc() Area" },
90 { 0/*VMALLOC_END*/, "vmalloc() End" }, 90 { 0/*VMALLOC_END*/, "vmalloc() End" },
91# ifdef CONFIG_HIGHMEM 91# ifdef CONFIG_HIGHMEM
92 { 0/*PKMAP_BASE*/, "Persisent kmap() Area" }, 92 { 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
93# endif 93# endif
94 { 0/*FIXADDR_START*/, "Fixmap Area" }, 94 { 0/*FIXADDR_START*/, "Fixmap Area" },
95#endif 95#endif
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 1202d5ca2fb5..b2fd67da1701 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -101,19 +101,19 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
101 switch (type) { 101 switch (type) {
102 case REG_TYPE_RM: 102 case REG_TYPE_RM:
103 regno = X86_MODRM_RM(insn->modrm.value); 103 regno = X86_MODRM_RM(insn->modrm.value);
104 if (X86_REX_B(insn->rex_prefix.value) == 1) 104 if (X86_REX_B(insn->rex_prefix.value))
105 regno += 8; 105 regno += 8;
106 break; 106 break;
107 107
108 case REG_TYPE_INDEX: 108 case REG_TYPE_INDEX:
109 regno = X86_SIB_INDEX(insn->sib.value); 109 regno = X86_SIB_INDEX(insn->sib.value);
110 if (X86_REX_X(insn->rex_prefix.value) == 1) 110 if (X86_REX_X(insn->rex_prefix.value))
111 regno += 8; 111 regno += 8;
112 break; 112 break;
113 113
114 case REG_TYPE_BASE: 114 case REG_TYPE_BASE:
115 regno = X86_SIB_BASE(insn->sib.value); 115 regno = X86_SIB_BASE(insn->sib.value);
116 if (X86_REX_B(insn->rex_prefix.value) == 1) 116 if (X86_REX_B(insn->rex_prefix.value))
117 regno += 8; 117 regno += 8;
118 break; 118 break;
119 119
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
index 7bcf06a7cd12..6eb3c8af96e2 100644
--- a/arch/x86/pci/bus_numa.c
+++ b/arch/x86/pci/bus_numa.c
@@ -50,18 +50,9 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources)
50 if (!found) 50 if (!found)
51 pci_add_resource(resources, &info->busn); 51 pci_add_resource(resources, &info->busn);
52 52
53 list_for_each_entry(root_res, &info->resources, list) { 53 list_for_each_entry(root_res, &info->resources, list)
54 struct resource *res; 54 pci_add_resource(resources, &root_res->res);
55 struct resource *root;
56 55
57 res = &root_res->res;
58 pci_add_resource(resources, res);
59 if (res->flags & IORESOURCE_IO)
60 root = &ioport_resource;
61 else
62 root = &iomem_resource;
63 insert_resource(root, res);
64 }
65 return; 56 return;
66 57
67default_resources: 58default_resources:
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index 06934a8a4872..14fcd01ed992 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -211,7 +211,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
211 if (err) 211 if (err)
212 return 1; 212 return 1;
213 213
214 err = convert_fxsr_from_user(&fpx, sc.fpstate); 214 err = convert_fxsr_from_user(&fpx, (void *)sc.fpstate);
215 if (err) 215 if (err)
216 return 1; 216 return 1;
217 217
@@ -227,7 +227,7 @@ static int copy_sc_from_user(struct pt_regs *regs,
227 { 227 {
228 struct user_i387_struct fp; 228 struct user_i387_struct fp;
229 229
230 err = copy_from_user(&fp, sc.fpstate, 230 err = copy_from_user(&fp, (void *)sc.fpstate,
231 sizeof(struct user_i387_struct)); 231 sizeof(struct user_i387_struct));
232 if (err) 232 if (err)
233 return 1; 233 return 1;
@@ -291,7 +291,7 @@ static int copy_sc_to_user(struct sigcontext __user *to,
291#endif 291#endif
292#undef PUTREG 292#undef PUTREG
293 sc.oldmask = mask; 293 sc.oldmask = mask;
294 sc.fpstate = to_fp; 294 sc.fpstate = (unsigned long)to_fp;
295 295
296 err = copy_to_user(to, &sc, sizeof(struct sigcontext)); 296 err = copy_to_user(to, &sc, sizeof(struct sigcontext));
297 if (err) 297 if (err)
@@ -468,12 +468,10 @@ long sys_sigreturn(void)
468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8); 468 struct sigframe __user *frame = (struct sigframe __user *)(sp - 8);
469 sigset_t set; 469 sigset_t set;
470 struct sigcontext __user *sc = &frame->sc; 470 struct sigcontext __user *sc = &frame->sc;
471 unsigned long __user *oldmask = &sc->oldmask;
472 unsigned long __user *extramask = frame->extramask;
473 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long); 471 int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
474 472
475 if (copy_from_user(&set.sig[0], oldmask, sizeof(set.sig[0])) || 473 if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
476 copy_from_user(&set.sig[1], extramask, sig_size)) 474 copy_from_user(&set.sig[1], frame->extramask, sig_size))
477 goto segfault; 475 goto segfault;
478 476
479 set_current_blocked(&set); 477 set_current_blocked(&set);
@@ -505,6 +503,7 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
505{ 503{
506 struct rt_sigframe __user *frame; 504 struct rt_sigframe __user *frame;
507 int err = 0, sig = ksig->sig; 505 int err = 0, sig = ksig->sig;
506 unsigned long fp_to;
508 507
509 frame = (struct rt_sigframe __user *) 508 frame = (struct rt_sigframe __user *)
510 round_down(stack_top - sizeof(struct rt_sigframe), 16); 509 round_down(stack_top - sizeof(struct rt_sigframe), 16);
@@ -526,7 +525,10 @@ int setup_signal_stack_si(unsigned long stack_top, struct ksignal *ksig,
526 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs)); 525 err |= __save_altstack(&frame->uc.uc_stack, PT_REGS_SP(regs));
527 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs, 526 err |= copy_sc_to_user(&frame->uc.uc_mcontext, &frame->fpstate, regs,
528 set->sig[0]); 527 set->sig[0]);
529 err |= __put_user(&frame->fpstate, &frame->uc.uc_mcontext.fpstate); 528
529 fp_to = (unsigned long)&frame->fpstate;
530
531 err |= __put_user(fp_to, &frame->uc.uc_mcontext.fpstate);
530 if (sizeof(*set) == 16) { 532 if (sizeof(*set) == 16) {
531 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]); 533 err |= __put_user(set->sig[0], &frame->uc.uc_sigmask.sig[0]);
532 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]); 534 err |= __put_user(set->sig[1], &frame->uc.uc_sigmask.sig[1]);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ac161db63388..cb5e266a8bf7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
2495{ 2495{
2496 x86_init.paging.pagetable_init = xen_pagetable_init; 2496 x86_init.paging.pagetable_init = xen_pagetable_init;
2497 2497
2498 /* Optimization - we can use the HVM one but it has no idea which 2498 if (xen_feature(XENFEAT_auto_translated_physmap))
2499 * VCPUs are descheduled - which means that it will needlessly IPI
2500 * them. Xen knows so let it do the job.
2501 */
2502 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2503 pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
2504 return; 2499 return;
2505 } 2500
2506 pv_mmu_ops = xen_mmu_ops; 2501 pv_mmu_ops = xen_mmu_ops;
2507 2502
2508 memset(dummy_mapping, 0xff, PAGE_SIZE); 2503 memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index feddabdab448..3705eabd7e22 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
68 68
69void xen_arch_pre_suspend(void) 69void xen_arch_pre_suspend(void)
70{ 70{
71 int cpu;
72
73 for_each_online_cpu(cpu)
74 xen_pmu_finish(cpu);
75
76 if (xen_pv_domain()) 71 if (xen_pv_domain())
77 xen_pv_pre_suspend(); 72 xen_pv_pre_suspend();
78} 73}
79 74
80void xen_arch_post_suspend(int cancelled) 75void xen_arch_post_suspend(int cancelled)
81{ 76{
82 int cpu;
83
84 if (xen_pv_domain()) 77 if (xen_pv_domain())
85 xen_pv_post_suspend(cancelled); 78 xen_pv_post_suspend(cancelled);
86 else 79 else
87 xen_hvm_post_suspend(cancelled); 80 xen_hvm_post_suspend(cancelled);
88
89 for_each_online_cpu(cpu)
90 xen_pmu_init(cpu);
91} 81}
92 82
93static void xen_vcpu_notify_restore(void *data) 83static void xen_vcpu_notify_restore(void *data)
@@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
106 96
107void xen_arch_resume(void) 97void xen_arch_resume(void)
108{ 98{
99 int cpu;
100
109 on_each_cpu(xen_vcpu_notify_restore, NULL, 1); 101 on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
102
103 for_each_online_cpu(cpu)
104 xen_pmu_init(cpu);
110} 105}
111 106
112void xen_arch_suspend(void) 107void xen_arch_suspend(void)
113{ 108{
109 int cpu;
110
111 for_each_online_cpu(cpu)
112 xen_pmu_finish(cpu);
113
114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); 114 on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
115} 115}