aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Walleij <linus.walleij@linaro.org>2013-10-19 17:24:03 -0400
committerLinus Walleij <linus.walleij@linaro.org>2013-10-19 17:24:03 -0400
commitb41fb43911b4cb864812adec88d028cc6219f23e (patch)
treef383de554dc3640ec3081ad41fa34f7cab68de82 /arch
parent0963d59bc0bbed48b06733950f5eb167e9b9a8fa (diff)
parent31d141e3a666269a3b6fcccddb0351caf7454240 (diff)
Merge tag 'v3.12-rc6' into devel
Linux 3.12-rc6 Conflicts: drivers/gpio/gpio-lynxpoint.c
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/kernel/ptrace.c2
-rw-r--r--arch/arm/Makefile9
-rw-r--r--arch/arm/boot/Makefile16
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi5
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi4
-rw-r--r--arch/arm/boot/install.sh14
-rw-r--r--arch/arm/common/mcpm_entry.c6
-rw-r--r--arch/arm/common/sharpsl_param.c5
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/include/asm/mcpm.h14
-rw-r--r--arch/arm/include/asm/syscall.h6
-rw-r--r--arch/arm/kernel/head.S21
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c9
-rw-r--r--arch/arm/mach-omap2/gpmc-onenand.c12
-rw-r--r--arch/arm/mach-omap2/mux.h4
-rw-r--r--arch/arm/mach-omap2/timer.c4
-rw-r--r--arch/arm/mm/dma-mapping.c43
-rw-r--r--arch/arm/mm/init.c3
-rw-r--r--arch/mips/include/asm/jump_label.h2
-rw-r--r--arch/mips/kernel/octeon_switch.S2
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S2
-rw-r--r--arch/parisc/include/asm/traps.h2
-rw-r--r--arch/parisc/kernel/cache.c1
-rw-r--r--arch/parisc/kernel/smp.c8
-rw-r--r--arch/parisc/kernel/traps.c11
-rw-r--r--arch/parisc/lib/memcpy.c15
-rw-r--r--arch/parisc/mm/fault.c15
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c18
-rw-r--r--arch/s390/include/asm/jump_label.h2
-rw-r--r--arch/s390/kernel/crash_dump.c42
-rw-r--r--arch/s390/kernel/entry.S1
-rw-r--r--arch/s390/kernel/entry64.S1
-rw-r--r--arch/s390/kernel/kprobes.c6
-rw-r--r--arch/sparc/include/asm/jump_label.h2
-rw-r--r--arch/tile/include/asm/atomic.h5
-rw-r--r--arch/tile/include/asm/atomic_32.h27
-rw-r--r--arch/tile/include/asm/cmpxchg.h28
-rw-r--r--arch/tile/include/asm/percpu.h34
-rw-r--r--arch/tile/kernel/hardwall.c6
-rw-r--r--arch/tile/kernel/intvec_32.S3
-rw-r--r--arch/tile/kernel/intvec_64.S3
-rw-r--r--arch/tile/kernel/stack.c12
-rw-r--r--arch/tile/lib/atomic_32.c8
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/include/asm/cpufeature.h6
-rw-r--r--arch/x86/include/asm/jump_label.h2
-rw-r--r--arch/x86/include/asm/mutex_64.h4
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kernel/kvm.c17
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kvm/vmx.c24
-rw-r--r--arch/x86/xen/smp.c9
60 files changed, 378 insertions, 179 deletions
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 333238564b67..5d76706139dd 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
102 REG_IGNORE_ONE(pad2); 102 REG_IGNORE_ONE(pad2);
103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */ 103 REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
104 REG_IGNORE_ONE(efa); /* efa update invalid */ 104 REG_IGNORE_ONE(efa); /* efa update invalid */
105 REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */ 105 REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
106 106
107 return ret; 107 return ret;
108} 108}
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index a37a50f575a2..db50b626be98 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -296,10 +296,15 @@ archprepare:
296# Convert bzImage to zImage 296# Convert bzImage to zImage
297bzImage: zImage 297bzImage: zImage
298 298
299zImage Image xipImage bootpImage uImage: vmlinux 299BOOT_TARGETS = zImage Image xipImage bootpImage uImage
300INSTALL_TARGETS = zinstall uinstall install
301
302PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
303
304$(BOOT_TARGETS): vmlinux
300 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ 305 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
301 306
302zinstall uinstall install: vmlinux 307$(INSTALL_TARGETS):
303 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 308 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
304 309
305%.dtb: | scripts 310%.dtb: | scripts
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 84aa2caf07ed..ec2f8065f955 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -95,24 +95,24 @@ initrd:
95 @test "$(INITRD)" != "" || \ 95 @test "$(INITRD)" != "" || \
96 (echo You must specify INITRD; exit -1) 96 (echo You must specify INITRD; exit -1)
97 97
98install: $(obj)/Image 98install:
99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 99 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
100 $(obj)/Image System.map "$(INSTALL_PATH)" 100 $(obj)/Image System.map "$(INSTALL_PATH)"
101 101
102zinstall: $(obj)/zImage 102zinstall:
103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 103 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
104 $(obj)/zImage System.map "$(INSTALL_PATH)" 104 $(obj)/zImage System.map "$(INSTALL_PATH)"
105 105
106uinstall: $(obj)/uImage 106uinstall:
107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 107 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
108 $(obj)/uImage System.map "$(INSTALL_PATH)" 108 $(obj)/uImage System.map "$(INSTALL_PATH)"
109 109
110zi: 110zi:
111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 111 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
112 $(obj)/zImage System.map "$(INSTALL_PATH)" 112 $(obj)/zImage System.map "$(INSTALL_PATH)"
113 113
114i: 114i:
115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ 115 $(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
116 $(obj)/Image System.map "$(INSTALL_PATH)" 116 $(obj)/Image System.map "$(INSTALL_PATH)"
117 117
118subdir- := bootp compressed dts 118subdir- := bootp compressed dts
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 7d7cc777ff7b..bbac42a78ce5 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -96,6 +96,11 @@
96 <1 14 0xf08>, 96 <1 14 0xf08>,
97 <1 11 0xf08>, 97 <1 11 0xf08>,
98 <1 10 0xf08>; 98 <1 10 0xf08>;
99 /* Unfortunately we need this since some versions of U-Boot
100 * on Exynos don't set the CNTFRQ register, so we need the
101 * value from DT.
102 */
103 clock-frequency = <24000000>;
99 }; 104 };
100 105
101 mct@101C0000 { 106 mct@101C0000 {
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index 0c514dc8460c..2816bf612672 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -11,7 +11,7 @@
11 11
12/ { 12/ {
13 model = "TI OMAP3 BeagleBoard xM"; 13 model = "TI OMAP3 BeagleBoard xM";
14 compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3"; 14 compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
15 15
16 cpus { 16 cpus {
17 cpu@0 { 17 cpu@0 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 7d95cda1fae4..b41bd57f4328 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -108,7 +108,7 @@
108 #address-cells = <1>; 108 #address-cells = <1>;
109 #size-cells = <0>; 109 #size-cells = <0>;
110 pinctrl-single,register-width = <16>; 110 pinctrl-single,register-width = <16>;
111 pinctrl-single,function-mask = <0x7f1f>; 111 pinctrl-single,function-mask = <0xff1f>;
112 }; 112 };
113 113
114 omap3_pmx_wkup: pinmux@0x48002a00 { 114 omap3_pmx_wkup: pinmux@0x48002a00 {
@@ -117,7 +117,7 @@
117 #address-cells = <1>; 117 #address-cells = <1>;
118 #size-cells = <0>; 118 #size-cells = <0>;
119 pinctrl-single,register-width = <16>; 119 pinctrl-single,register-width = <16>;
120 pinctrl-single,function-mask = <0x7f1f>; 120 pinctrl-single,function-mask = <0xff1f>;
121 }; 121 };
122 122
123 gpio1: gpio@48310000 { 123 gpio1: gpio@48310000 {
diff --git a/arch/arm/boot/install.sh b/arch/arm/boot/install.sh
index 06ea7d42ce8e..2a45092a40e3 100644
--- a/arch/arm/boot/install.sh
+++ b/arch/arm/boot/install.sh
@@ -20,6 +20,20 @@
20# $4 - default install path (blank if root directory) 20# $4 - default install path (blank if root directory)
21# 21#
22 22
23verify () {
24 if [ ! -f "$1" ]; then
25 echo "" 1>&2
26 echo " *** Missing file: $1" 1>&2
27 echo ' *** You need to run "make" before "make install".' 1>&2
28 echo "" 1>&2
29 exit 1
30 fi
31}
32
33# Make sure the files actually exist
34verify "$2"
35verify "$3"
36
23# User may have a custom install script 37# User may have a custom install script
24if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi 38if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
25if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi 39if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 370236dd1a03..990250965f2c 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
51{ 51{
52 phys_reset_t phys_reset; 52 phys_reset_t phys_reset;
53 53
54 BUG_ON(!platform_ops); 54 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
55 return;
55 BUG_ON(!irqs_disabled()); 56 BUG_ON(!irqs_disabled());
56 57
57 /* 58 /*
@@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
93{ 94{
94 phys_reset_t phys_reset; 95 phys_reset_t phys_reset;
95 96
96 BUG_ON(!platform_ops); 97 if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
98 return;
97 BUG_ON(!irqs_disabled()); 99 BUG_ON(!irqs_disabled());
98 100
99 /* Very similar to mcpm_cpu_power_down() */ 101 /* Very similar to mcpm_cpu_power_down() */
diff --git a/arch/arm/common/sharpsl_param.c b/arch/arm/common/sharpsl_param.c
index d56c932580eb..025f6ce38596 100644
--- a/arch/arm/common/sharpsl_param.c
+++ b/arch/arm/common/sharpsl_param.c
@@ -15,6 +15,7 @@
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <asm/mach/sharpsl_param.h> 17#include <asm/mach/sharpsl_param.h>
18#include <asm/memory.h>
18 19
19/* 20/*
20 * Certain hardware parameters determined at the time of device manufacture, 21 * Certain hardware parameters determined at the time of device manufacture,
@@ -25,8 +26,10 @@
25 */ 26 */
26#ifdef CONFIG_ARCH_SA1100 27#ifdef CONFIG_ARCH_SA1100
27#define PARAM_BASE 0xe8ffc000 28#define PARAM_BASE 0xe8ffc000
29#define param_start(x) (void *)(x)
28#else 30#else
29#define PARAM_BASE 0xa0000a00 31#define PARAM_BASE 0xa0000a00
32#define param_start(x) __va(x)
30#endif 33#endif
31#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) 34#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
32 35
@@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
41 44
42void sharpsl_save_param(void) 45void sharpsl_save_param(void)
43{ 46{
44 memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); 47 memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
45 48
46 if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) 49 if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
47 sharpsl_param.comadj=-1; 50 sharpsl_param.comadj=-1;
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index d3db39860b9c..59ceae8f3c95 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -31,5 +31,4 @@ generic-y += termbits.h
31generic-y += termios.h 31generic-y += termios.h
32generic-y += timex.h 32generic-y += timex.h
33generic-y += trace_clock.h 33generic-y += trace_clock.h
34generic-y += types.h
35generic-y += unaligned.h 34generic-y += unaligned.h
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index bfc198c75913..863c892b4aaa 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -16,7 +16,7 @@
16 16
17static __always_inline bool arch_static_branch(struct static_key *key) 17static __always_inline bool arch_static_branch(struct static_key *key)
18{ 18{
19 asm goto("1:\n\t" 19 asm_volatile_goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t" 20 JUMP_LABEL_NOP "\n\t"
21 ".pushsection __jump_table, \"aw\"\n\t" 21 ".pushsection __jump_table, \"aw\"\n\t"
22 ".word 1b, %l[l_yes], %c0\n\t" 22 ".word 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index 0f7b7620e9a5..fc82a88f5b69 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
76 * 76 *
77 * This must be called with interrupts disabled. 77 * This must be called with interrupts disabled.
78 * 78 *
79 * This does not return. Re-entry in the kernel is expected via 79 * On success this does not return. Re-entry in the kernel is expected
80 * mcpm_entry_point. 80 * via mcpm_entry_point.
81 *
82 * This will return if mcpm_platform_register() has not been called
83 * previously in which case the caller should take appropriate action.
81 */ 84 */
82void mcpm_cpu_power_down(void); 85void mcpm_cpu_power_down(void);
83 86
@@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
98 * 101 *
99 * This must be called with interrupts disabled. 102 * This must be called with interrupts disabled.
100 * 103 *
101 * This does not return. Re-entry in the kernel is expected via 104 * On success this does not return. Re-entry in the kernel is expected
102 * mcpm_entry_point. 105 * via mcpm_entry_point.
106 *
107 * This will return if mcpm_platform_register() has not been called
108 * previously in which case the caller should take appropriate action.
103 */ 109 */
104void mcpm_cpu_suspend(u64 expected_residency); 110void mcpm_cpu_suspend(u64 expected_residency);
105 111
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index f1d96d4e8092..73ddd7239b33 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
57 unsigned int i, unsigned int n, 57 unsigned int i, unsigned int n,
58 unsigned long *args) 58 unsigned long *args)
59{ 59{
60 if (n == 0)
61 return;
62
60 if (i + n > SYSCALL_MAX_ARGS) { 63 if (i + n > SYSCALL_MAX_ARGS) {
61 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i; 64 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
62 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS; 65 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
81 unsigned int i, unsigned int n, 84 unsigned int i, unsigned int n,
82 const unsigned long *args) 85 const unsigned long *args)
83{ 86{
87 if (n == 0)
88 return;
89
84 if (i + n > SYSCALL_MAX_ARGS) { 90 if (i + n > SYSCALL_MAX_ARGS) {
85 pr_warning("%s called with max args %d, handling only %d\n", 91 pr_warning("%s called with max args %d, handling only %d\n",
86 __func__, i + n, SYSCALL_MAX_ARGS); 92 __func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 2c7cc1e03473..476de57dcef2 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -487,7 +487,26 @@ __fixup_smp:
487 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR 487 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
488 and r0, r0, #0xc0000000 @ multiprocessing extensions and 488 and r0, r0, #0xc0000000 @ multiprocessing extensions and
489 teq r0, #0x80000000 @ not part of a uniprocessor system? 489 teq r0, #0x80000000 @ not part of a uniprocessor system?
490 moveq pc, lr @ yes, assume SMP 490 bne __fixup_smp_on_up @ no, assume UP
491
492 @ Core indicates it is SMP. Check for Aegis SOC where a single
493 @ Cortex-A9 CPU is present but SMP operations fault.
494 mov r4, #0x41000000
495 orr r4, r4, #0x0000c000
496 orr r4, r4, #0x00000090
497 teq r3, r4 @ Check for ARM Cortex-A9
498 movne pc, lr @ Not ARM Cortex-A9,
499
500 @ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
501 @ below address check will need to be #ifdef'd or equivalent
502 @ for the Aegis platform.
503 mrc p15, 4, r0, c15, c0 @ get SCU base address
504 teq r0, #0x0 @ '0' on actual UP A9 hardware
505 beq __fixup_smp_on_up @ So its an A9 UP
506 ldr r0, [r0, #4] @ read SCU Config
507 and r0, r0, #0x3 @ number of CPUs
508 teq r0, #0x0 @ is 1?
509 movne pc, lr
491 510
492__fixup_smp_on_up: 511__fixup_smp_on_up:
493 adr r0, 1f 512 adr r0, 1f
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 39c78387ddec..87162e1b94a5 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
129 .restart = omap3xxx_restart, 129 .restart = omap3xxx_restart,
130MACHINE_END 130MACHINE_END
131 131
132static const char *omap36xx_boards_compat[] __initdata = {
133 "ti,omap36xx",
134 NULL,
135};
136
137DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
138 .reserve = omap_reserve,
139 .map_io = omap3_map_io,
140 .init_early = omap3630_init_early,
141 .init_irq = omap_intc_of_init,
142 .handle_irq = omap3_intc_handle_irq,
143 .init_machine = omap_generic_init,
144 .init_late = omap3_init_late,
145 .init_time = omap3_sync32k_timer_init,
146 .dt_compat = omap36xx_boards_compat,
147 .restart = omap3xxx_restart,
148MACHINE_END
149
132static const char *omap3_gp_boards_compat[] __initdata = { 150static const char *omap3_gp_boards_compat[] __initdata = {
133 "ti,omap3-beagle", 151 "ti,omap3-beagle",
134 "timll,omap3-devkit8000", 152 "timll,omap3-devkit8000",
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index c3270c0f1fce..f6fe388af989 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
167 .name = "lp5523:kb1", 167 .name = "lp5523:kb1",
168 .chan_nr = 0, 168 .chan_nr = 0,
169 .led_current = 50, 169 .led_current = 50,
170 .max_current = 100,
170 }, { 171 }, {
171 .name = "lp5523:kb2", 172 .name = "lp5523:kb2",
172 .chan_nr = 1, 173 .chan_nr = 1,
173 .led_current = 50, 174 .led_current = 50,
175 .max_current = 100,
174 }, { 176 }, {
175 .name = "lp5523:kb3", 177 .name = "lp5523:kb3",
176 .chan_nr = 2, 178 .chan_nr = 2,
177 .led_current = 50, 179 .led_current = 50,
180 .max_current = 100,
178 }, { 181 }, {
179 .name = "lp5523:kb4", 182 .name = "lp5523:kb4",
180 .chan_nr = 3, 183 .chan_nr = 3,
181 .led_current = 50, 184 .led_current = 50,
185 .max_current = 100,
182 }, { 186 }, {
183 .name = "lp5523:b", 187 .name = "lp5523:b",
184 .chan_nr = 4, 188 .chan_nr = 4,
185 .led_current = 50, 189 .led_current = 50,
190 .max_current = 100,
186 }, { 191 }, {
187 .name = "lp5523:g", 192 .name = "lp5523:g",
188 .chan_nr = 5, 193 .chan_nr = 5,
189 .led_current = 50, 194 .led_current = 50,
195 .max_current = 100,
190 }, { 196 }, {
191 .name = "lp5523:r", 197 .name = "lp5523:r",
192 .chan_nr = 6, 198 .chan_nr = 6,
193 .led_current = 50, 199 .led_current = 50,
200 .max_current = 100,
194 }, { 201 }, {
195 .name = "lp5523:kb5", 202 .name = "lp5523:kb5",
196 .chan_nr = 7, 203 .chan_nr = 7,
197 .led_current = 50, 204 .led_current = 50,
205 .max_current = 100,
198 }, { 206 }, {
199 .name = "lp5523:kb6", 207 .name = "lp5523:kb6",
200 .chan_nr = 8, 208 .chan_nr = 8,
201 .led_current = 50, 209 .led_current = 50,
210 .max_current = 100,
202 } 211 }
203}; 212};
204 213
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 64b5a8346982..8b6876c98ce1 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
272 struct gpmc_timings t; 272 struct gpmc_timings t;
273 int ret; 273 int ret;
274 274
275 if (gpmc_onenand_data->of_node) 275 if (gpmc_onenand_data->of_node) {
276 gpmc_read_settings_dt(gpmc_onenand_data->of_node, 276 gpmc_read_settings_dt(gpmc_onenand_data->of_node,
277 &onenand_async); 277 &onenand_async);
278 if (onenand_async.sync_read || onenand_async.sync_write) {
279 if (onenand_async.sync_write)
280 gpmc_onenand_data->flags |=
281 ONENAND_SYNC_READWRITE;
282 else
283 gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
284 onenand_async.sync_read = false;
285 onenand_async.sync_write = false;
286 }
287 }
278 288
279 omap2_onenand_set_async_mode(onenand_base); 289 omap2_onenand_set_async_mode(onenand_base);
280 290
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 5d2080ef7923..16f78a990d04 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -28,7 +28,7 @@
28#define OMAP_PULL_UP (1 << 4) 28#define OMAP_PULL_UP (1 << 4)
29#define OMAP_ALTELECTRICALSEL (1 << 5) 29#define OMAP_ALTELECTRICALSEL (1 << 5)
30 30
31/* 34xx specific mux bit defines */ 31/* omap3/4/5 specific mux bit defines */
32#define OMAP_INPUT_EN (1 << 8) 32#define OMAP_INPUT_EN (1 << 8)
33#define OMAP_OFF_EN (1 << 9) 33#define OMAP_OFF_EN (1 << 9)
34#define OMAP_OFFOUT_EN (1 << 10) 34#define OMAP_OFFOUT_EN (1 << 10)
@@ -36,8 +36,6 @@
36#define OMAP_OFF_PULL_EN (1 << 12) 36#define OMAP_OFF_PULL_EN (1 << 12)
37#define OMAP_OFF_PULL_UP (1 << 13) 37#define OMAP_OFF_PULL_UP (1 << 13)
38#define OMAP_WAKEUP_EN (1 << 14) 38#define OMAP_WAKEUP_EN (1 << 14)
39
40/* 44xx specific mux bit defines */
41#define OMAP_WAKEUP_EVENT (1 << 15) 39#define OMAP_WAKEUP_EVENT (1 << 15)
42 40
43/* Active pin states */ 41/* Active pin states */
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index fa74a0625da1..ead48fa5715e 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
628#endif /* CONFIG_HAVE_ARM_TWD */ 628#endif /* CONFIG_HAVE_ARM_TWD */
629#endif /* CONFIG_ARCH_OMAP4 */ 629#endif /* CONFIG_ARCH_OMAP4 */
630 630
631#ifdef CONFIG_SOC_OMAP5 631#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
632void __init omap5_realtime_timer_init(void) 632void __init omap5_realtime_timer_init(void)
633{ 633{
634 omap4_sync32k_timer_init(); 634 omap4_sync32k_timer_init();
@@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
636 636
637 clocksource_of_init(); 637 clocksource_of_init();
638} 638}
639#endif /* CONFIG_SOC_OMAP5 */ 639#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
640 640
641/** 641/**
642 * omap_timer_init - build and register timer device with an 642 * omap_timer_init - build and register timer device with an
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f5e1a8471714..1272ed202dde 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1232 break; 1232 break;
1233 1233
1234 len = (j - i) << PAGE_SHIFT; 1234 len = (j - i) << PAGE_SHIFT;
1235 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1235 ret = iommu_map(mapping->domain, iova, phys, len,
1236 IOMMU_READ|IOMMU_WRITE);
1236 if (ret < 0) 1237 if (ret < 0)
1237 goto fail; 1238 goto fail;
1238 iova += len; 1239 iova += len;
@@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1431 GFP_KERNEL); 1432 GFP_KERNEL);
1432} 1433}
1433 1434
1435static int __dma_direction_to_prot(enum dma_data_direction dir)
1436{
1437 int prot;
1438
1439 switch (dir) {
1440 case DMA_BIDIRECTIONAL:
1441 prot = IOMMU_READ | IOMMU_WRITE;
1442 break;
1443 case DMA_TO_DEVICE:
1444 prot = IOMMU_READ;
1445 break;
1446 case DMA_FROM_DEVICE:
1447 prot = IOMMU_WRITE;
1448 break;
1449 default:
1450 prot = 0;
1451 }
1452
1453 return prot;
1454}
1455
1434/* 1456/*
1435 * Map a part of the scatter-gather list into contiguous io address space 1457 * Map a part of the scatter-gather list into contiguous io address space
1436 */ 1458 */
@@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1444 int ret = 0; 1466 int ret = 0;
1445 unsigned int count; 1467 unsigned int count;
1446 struct scatterlist *s; 1468 struct scatterlist *s;
1469 int prot;
1447 1470
1448 size = PAGE_ALIGN(size); 1471 size = PAGE_ALIGN(size);
1449 *handle = DMA_ERROR_CODE; 1472 *handle = DMA_ERROR_CODE;
@@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1460 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) 1483 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1461 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); 1484 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1462 1485
1463 ret = iommu_map(mapping->domain, iova, phys, len, 0); 1486 prot = __dma_direction_to_prot(dir);
1487
1488 ret = iommu_map(mapping->domain, iova, phys, len, prot);
1464 if (ret < 0) 1489 if (ret < 0)
1465 goto fail; 1490 goto fail;
1466 count += len >> PAGE_SHIFT; 1491 count += len >> PAGE_SHIFT;
@@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
1665 if (dma_addr == DMA_ERROR_CODE) 1690 if (dma_addr == DMA_ERROR_CODE)
1666 return dma_addr; 1691 return dma_addr;
1667 1692
1668 switch (dir) { 1693 prot = __dma_direction_to_prot(dir);
1669 case DMA_BIDIRECTIONAL:
1670 prot = IOMMU_READ | IOMMU_WRITE;
1671 break;
1672 case DMA_TO_DEVICE:
1673 prot = IOMMU_READ;
1674 break;
1675 case DMA_FROM_DEVICE:
1676 prot = IOMMU_WRITE;
1677 break;
1678 default:
1679 prot = 0;
1680 }
1681 1694
1682 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); 1695 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
1683 if (ret < 0) 1696 if (ret < 0)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index febaee7ca57b..18ec4c504abf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -17,7 +17,6 @@
17#include <linux/nodemask.h> 17#include <linux/nodemask.h>
18#include <linux/initrd.h> 18#include <linux/initrd.h>
19#include <linux/of_fdt.h> 19#include <linux/of_fdt.h>
20#include <linux/of_reserved_mem.h>
21#include <linux/highmem.h> 20#include <linux/highmem.h>
22#include <linux/gfp.h> 21#include <linux/gfp.h>
23#include <linux/memblock.h> 22#include <linux/memblock.h>
@@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
379 if (mdesc->reserve) 378 if (mdesc->reserve)
380 mdesc->reserve(); 379 mdesc->reserve();
381 380
382 early_init_dt_scan_reserved_mem();
383
384 /* 381 /*
385 * reserve memory for DMA contigouos allocations, 382 * reserve memory for DMA contigouos allocations,
386 * must come from DMA area inside low memory 383 * must come from DMA area inside low memory
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index 4d6d77ed9b9d..e194f957ca8c 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -22,7 +22,7 @@
22 22
23static __always_inline bool arch_static_branch(struct static_key *key) 23static __always_inline bool arch_static_branch(struct static_key *key)
24{ 24{
25 asm goto("1:\tnop\n\t" 25 asm_volatile_goto("1:\tnop\n\t"
26 "nop\n\t" 26 "nop\n\t"
27 ".pushsection __jump_table, \"aw\"\n\t" 27 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[l_yes], %0\n\t" 28 WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 4204d76af854..029e002a4ea0 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -73,7 +73,7 @@
733: 733:
74 74
75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 75#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
76 PTR_L t8, __stack_chk_guard 76 PTR_LA t8, __stack_chk_guard
77 LONG_L t9, TASK_STACK_CANARY(a1) 77 LONG_L t9, TASK_STACK_CANARY(a1)
78 LONG_S t9, 0(t8) 78 LONG_S t9, 0(t8)
79#endif 79#endif
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 38af83f84c4a..20b7b040e76f 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -67,7 +67,7 @@ LEAF(resume)
671: 671:
68 68
69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 69#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
70 PTR_L t8, __stack_chk_guard 70 PTR_LA t8, __stack_chk_guard
71 LONG_L t9, TASK_STACK_CANARY(a1) 71 LONG_L t9, TASK_STACK_CANARY(a1)
72 LONG_S t9, 0(t8) 72 LONG_S t9, 0(t8)
73#endif 73#endif
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 921238a6bd26..078de5eaca8f 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -69,7 +69,7 @@
691: 691:
70 70
71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 71#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
72 PTR_L t8, __stack_chk_guard 72 PTR_LA t8, __stack_chk_guard
73 LONG_L t9, TASK_STACK_CANARY(a1) 73 LONG_L t9, TASK_STACK_CANARY(a1)
74 LONG_S t9, 0(t8) 74 LONG_S t9, 0(t8)
75#endif 75#endif
diff --git a/arch/parisc/include/asm/traps.h b/arch/parisc/include/asm/traps.h
index 1945f995f2df..4736020ba5ea 100644
--- a/arch/parisc/include/asm/traps.h
+++ b/arch/parisc/include/asm/traps.h
@@ -6,7 +6,7 @@ struct pt_regs;
6 6
7/* traps.c */ 7/* traps.c */
8void parisc_terminate(char *msg, struct pt_regs *regs, 8void parisc_terminate(char *msg, struct pt_regs *regs,
9 int code, unsigned long offset); 9 int code, unsigned long offset) __noreturn __cold;
10 10
11/* mm/fault.c */ 11/* mm/fault.c */
12void do_page_fault(struct pt_regs *regs, unsigned long code, 12void do_page_fault(struct pt_regs *regs, unsigned long code,
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c035673209f7..b521c0adf4ec 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -602,6 +602,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 } 603 }
604} 604}
605EXPORT_SYMBOL_GPL(flush_cache_page);
605 606
606#ifdef CONFIG_PARISC_TMPALIAS 607#ifdef CONFIG_PARISC_TMPALIAS
607 608
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c
index 8a252f2d6c08..2b96602e812f 100644
--- a/arch/parisc/kernel/smp.c
+++ b/arch/parisc/kernel/smp.c
@@ -72,7 +72,6 @@ enum ipi_message_type {
72 IPI_NOP=0, 72 IPI_NOP=0,
73 IPI_RESCHEDULE=1, 73 IPI_RESCHEDULE=1,
74 IPI_CALL_FUNC, 74 IPI_CALL_FUNC,
75 IPI_CALL_FUNC_SINGLE,
76 IPI_CPU_START, 75 IPI_CPU_START,
77 IPI_CPU_STOP, 76 IPI_CPU_STOP,
78 IPI_CPU_TEST 77 IPI_CPU_TEST
@@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
164 generic_smp_call_function_interrupt(); 163 generic_smp_call_function_interrupt();
165 break; 164 break;
166 165
167 case IPI_CALL_FUNC_SINGLE:
168 smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
169 generic_smp_call_function_single_interrupt();
170 break;
171
172 case IPI_CPU_START: 166 case IPI_CPU_START:
173 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu); 167 smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
174 break; 168 break;
@@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
260 254
261void arch_send_call_function_single_ipi(int cpu) 255void arch_send_call_function_single_ipi(int cpu)
262{ 256{
263 send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); 257 send_IPI_single(cpu, IPI_CALL_FUNC);
264} 258}
265 259
266/* 260/*
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 04e47c6a4562..1cd1d0c83b6d 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
291 do_exit(SIGSEGV); 291 do_exit(SIGSEGV);
292} 292}
293 293
294int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
295{
296 return syscall(regs);
297}
298
299/* gdb uses break 4,8 */ 294/* gdb uses break 4,8 */
300#define GDB_BREAK_INSN 0x10004 295#define GDB_BREAK_INSN 0x10004
301static void handle_gdb_break(struct pt_regs *regs, int wot) 296static void handle_gdb_break(struct pt_regs *regs, int wot)
@@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
805 else { 800 else {
806 801
807 /* 802 /*
808 * The kernel should never fault on its own address space. 803 * The kernel should never fault on its own address space,
804 * unless pagefault_disable() was called before.
809 */ 805 */
810 806
811 if (fault_space == 0) 807 if (fault_space == 0 && !in_atomic())
812 { 808 {
813 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 809 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
814 parisc_terminate("Kernel Fault", regs, code, fault_address); 810 parisc_terminate("Kernel Fault", regs, code, fault_address);
815
816 } 811 }
817 } 812 }
818 813
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index ac4370b1ca40..b5507ec06b84 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -56,7 +56,7 @@
56#ifdef __KERNEL__ 56#ifdef __KERNEL__
57#include <linux/module.h> 57#include <linux/module.h>
58#include <linux/compiler.h> 58#include <linux/compiler.h>
59#include <asm/uaccess.h> 59#include <linux/uaccess.h>
60#define s_space "%%sr1" 60#define s_space "%%sr1"
61#define d_space "%%sr2" 61#define d_space "%%sr2"
62#else 62#else
@@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
524EXPORT_SYMBOL(copy_from_user); 524EXPORT_SYMBOL(copy_from_user);
525EXPORT_SYMBOL(copy_in_user); 525EXPORT_SYMBOL(copy_in_user);
526EXPORT_SYMBOL(memcpy); 526EXPORT_SYMBOL(memcpy);
527
528long probe_kernel_read(void *dst, const void *src, size_t size)
529{
530 unsigned long addr = (unsigned long)src;
531
532 if (size < 0 || addr < PAGE_SIZE)
533 return -EFAULT;
534
535 /* check for I/O space F_EXTEND(0xfff00000) access as well? */
536
537 return __probe_kernel_read(dst, src, size);
538}
539
527#endif 540#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 00c0ed333a3d..0293588d5b8c 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -171,20 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
171 unsigned long address) 171 unsigned long address)
172{ 172{
173 struct vm_area_struct *vma, *prev_vma; 173 struct vm_area_struct *vma, *prev_vma;
174 struct task_struct *tsk = current; 174 struct task_struct *tsk;
175 struct mm_struct *mm = tsk->mm; 175 struct mm_struct *mm;
176 unsigned long acc_type; 176 unsigned long acc_type;
177 int fault; 177 int fault;
178 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 178 unsigned int flags;
179 179
180 if (in_atomic() || !mm) 180 if (in_atomic())
181 goto no_context; 181 goto no_context;
182 182
183 tsk = current;
184 mm = tsk->mm;
185 if (!mm)
186 goto no_context;
187
188 flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
183 if (user_mode(regs)) 189 if (user_mode(regs))
184 flags |= FAULT_FLAG_USER; 190 flags |= FAULT_FLAG_USER;
185 191
186 acc_type = parisc_acctyp(code, regs->iir); 192 acc_type = parisc_acctyp(code, regs->iir);
187
188 if (acc_type & VM_WRITE) 193 if (acc_type & VM_WRITE)
189 flags |= FAULT_FLAG_WRITE; 194 flags |= FAULT_FLAG_WRITE;
190retry: 195retry:
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index ae098c438f00..f016bb699b5f 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -19,7 +19,7 @@
19 19
20static __always_inline bool arch_static_branch(struct static_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm_volatile_goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
24 ".pushsection __jump_table, \"aw\"\n\t" 24 ".pushsection __jump_table, \"aw\"\n\t"
25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" 25 JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 57d286a78f86..c7cb8c232d2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
495void do_IRQ(struct pt_regs *regs) 495void do_IRQ(struct pt_regs *regs)
496{ 496{
497 struct pt_regs *old_regs = set_irq_regs(regs); 497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp; 498 struct thread_info *curtp, *irqtp, *sirqtp;
499 499
500 /* Switch to the irq stack to handle this */ 500 /* Switch to the irq stack to handle this */
501 curtp = current_thread_info(); 501 curtp = current_thread_info();
502 irqtp = hardirq_ctx[raw_smp_processor_id()]; 502 irqtp = hardirq_ctx[raw_smp_processor_id()];
503 sirqtp = softirq_ctx[raw_smp_processor_id()];
503 504
504 /* Already there ? */ 505 /* Already there ? */
505 if (unlikely(curtp == irqtp)) { 506 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
506 __do_irq(regs); 507 __do_irq(regs);
507 set_irq_regs(old_regs); 508 set_irq_regs(old_regs);
508 return; 509 return;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af28cdd..c71103b8a748 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1066BEGIN_FTR_SECTION 1066BEGIN_FTR_SECTION
1067 mfspr r8, SPRN_DSCR 1067 mfspr r8, SPRN_DSCR
1068 ld r7, HSTATE_DSCR(r13) 1068 ld r7, HSTATE_DSCR(r13)
1069 std r8, VCPU_DSCR(r7) 1069 std r8, VCPU_DSCR(r9)
1070 mtspr SPRN_DSCR, r7 1070 mtspr SPRN_DSCR, r7
1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1071END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1072 1072
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..c65593abae8e 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
332 unsigned long hva; 332 unsigned long hva;
333 int pfnmap = 0; 333 int pfnmap = 0;
334 int tsize = BOOK3E_PAGESZ_4K; 334 int tsize = BOOK3E_PAGESZ_4K;
335 int ret = 0;
336 unsigned long mmu_seq;
337 struct kvm *kvm = vcpu_e500->vcpu.kvm;
338
339 /* used to check for invalidations in progress */
340 mmu_seq = kvm->mmu_notifier_seq;
341 smp_rmb();
335 342
336 /* 343 /*
337 * Translate guest physical to true physical, acquiring 344 * Translate guest physical to true physical, acquiring
@@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
449 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1); 456 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
450 } 457 }
451 458
459 spin_lock(&kvm->mmu_lock);
460 if (mmu_notifier_retry(kvm, mmu_seq)) {
461 ret = -EAGAIN;
462 goto out;
463 }
464
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 465 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453 466
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, 467 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
457 /* Clear i-cache for new pages */ 470 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn); 471 kvmppc_mmu_flush_icache(pfn);
459 472
473out:
474 spin_unlock(&kvm->mmu_lock);
475
460 /* Drop refcount on page, so that mmu notifiers can clear it */ 476 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn); 477 kvm_release_pfn_clean(pfn);
462 478
463 return 0; 479 return ret;
464} 480}
465 481
466/* XXX only map the one-one case, for now use TLB0 */ 482/* XXX only map the one-one case, for now use TLB0 */
diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h
index 6c32190dc73e..346b1c85ffb4 100644
--- a/arch/s390/include/asm/jump_label.h
+++ b/arch/s390/include/asm/jump_label.h
@@ -15,7 +15,7 @@
15 15
16static __always_inline bool arch_static_branch(struct static_key *key) 16static __always_inline bool arch_static_branch(struct static_key *key)
17{ 17{
18 asm goto("0: brcl 0,0\n" 18 asm_volatile_goto("0: brcl 0,0\n"
19 ".pushsection __jump_table, \"aw\"\n" 19 ".pushsection __jump_table, \"aw\"\n"
20 ASM_ALIGN "\n" 20 ASM_ALIGN "\n"
21 ASM_PTR " 0b, %l[label], %0\n" 21 ASM_PTR " 0b, %l[label], %0\n"
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index c84f33d51f7b..7dd21720e5b0 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
40} 40}
41 41
42/* 42/*
43 * Copy up to one page to vmalloc or real memory 43 * Copy real to virtual or real memory
44 */ 44 */
45static ssize_t copy_page_real(void *buf, void *src, size_t csize) 45static int copy_from_realmem(void *dest, void *src, size_t count)
46{ 46{
47 size_t size; 47 unsigned long size;
48 int rc;
48 49
49 if (is_vmalloc_addr(buf)) { 50 if (!count)
50 BUG_ON(csize >= PAGE_SIZE); 51 return 0;
51 /* If buf is not page aligned, copy first part */ 52 if (!is_vmalloc_or_module_addr(dest))
52 size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize); 53 return memcpy_real(dest, src, count);
53 if (size) { 54 do {
54 if (memcpy_real(load_real_addr(buf), src, size)) 55 size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
55 return -EFAULT; 56 if (memcpy_real(load_real_addr(dest), src, size))
56 buf += size; 57 return -EFAULT;
57 src += size; 58 count -= size;
58 } 59 dest += size;
59 /* Copy second part */ 60 src += size;
60 size = csize - size; 61 } while (count);
61 return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0; 62 return 0;
62 } else {
63 return memcpy_real(buf, src, csize);
64 }
65} 63}
66 64
67/* 65/*
@@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
114 rc = copy_to_user_real((void __force __user *) buf, 112 rc = copy_to_user_real((void __force __user *) buf,
115 (void *) src, csize); 113 (void *) src, csize);
116 else 114 else
117 rc = copy_page_real(buf, (void *) src, csize); 115 rc = copy_from_realmem(buf, (void *) src, csize);
118 return (rc == 0) ? rc : csize; 116 return (rc == 0) ? rc : csize;
119} 117}
120 118
@@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
210 if (OLDMEM_BASE) { 208 if (OLDMEM_BASE) {
211 if ((unsigned long) src < OLDMEM_SIZE) { 209 if ((unsigned long) src < OLDMEM_SIZE) {
212 copied = min(count, OLDMEM_SIZE - (unsigned long) src); 210 copied = min(count, OLDMEM_SIZE - (unsigned long) src);
213 rc = memcpy_real(dest, src + OLDMEM_BASE, copied); 211 rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
214 if (rc) 212 if (rc)
215 return rc; 213 return rc;
216 } 214 }
@@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
223 return rc; 221 return rc;
224 } 222 }
225 } 223 }
226 return memcpy_real(dest + copied, src + copied, count - copied); 224 return copy_from_realmem(dest + copied, src + copied, count - copied);
227} 225}
228 226
229/* 227/*
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index cc30d1fb000c..0dc2b6d0a1ec 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -266,6 +266,7 @@ sysc_sigpending:
266 tm __TI_flags+3(%r12),_TIF_SYSCALL 266 tm __TI_flags+3(%r12),_TIF_SYSCALL
267 jno sysc_return 267 jno sysc_return
268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 268 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
269 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
269 xr %r8,%r8 # svc 0 returns -ENOSYS 270 xr %r8,%r8 # svc 0 returns -ENOSYS
270 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 271 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
271 jnl sysc_nr_ok # invalid svc number -> do svc 0 272 jnl sysc_nr_ok # invalid svc number -> do svc 0
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 2b2188b97c6a..e5b43c97a834 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -297,6 +297,7 @@ sysc_sigpending:
297 tm __TI_flags+7(%r12),_TIF_SYSCALL 297 tm __TI_flags+7(%r12),_TIF_SYSCALL
298 jno sysc_return 298 jno sysc_return
299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 299 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
300 lg %r10,__TI_sysc_table(%r12) # address of system call table
300 lghi %r8,0 # svc 0 returns -ENOSYS 301 lghi %r8,0 # svc 0 returns -ENOSYS
301 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 302 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
302 cghi %r1,NR_syscalls 303 cghi %r1,NR_syscalls
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 0ce9fb245034..d86e64eddb42 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
67 case 0xac: /* stnsm */ 67 case 0xac: /* stnsm */
68 case 0xad: /* stosm */ 68 case 0xad: /* stosm */
69 return -EINVAL; 69 return -EINVAL;
70 case 0xc6:
71 switch (insn[0] & 0x0f) {
72 case 0x00: /* exrl */
73 return -EINVAL;
74 }
70 } 75 }
71 switch (insn[0]) { 76 switch (insn[0]) {
72 case 0x0101: /* pr */ 77 case 0x0101: /* pr */
@@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
180 break; 185 break;
181 case 0xc6: 186 case 0xc6:
182 switch (insn[0] & 0x0f) { 187 switch (insn[0] & 0x0f) {
183 case 0x00: /* exrl */
184 case 0x02: /* pfdrl */ 188 case 0x02: /* pfdrl */
185 case 0x04: /* cghrl */ 189 case 0x04: /* cghrl */
186 case 0x05: /* chrl */ 190 case 0x05: /* chrl */
diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h
index 5080d16a832f..ec2e2e2aba7d 100644
--- a/arch/sparc/include/asm/jump_label.h
+++ b/arch/sparc/include/asm/jump_label.h
@@ -9,7 +9,7 @@
9 9
10static __always_inline bool arch_static_branch(struct static_key *key) 10static __always_inline bool arch_static_branch(struct static_key *key)
11{ 11{
12 asm goto("1:\n\t" 12 asm_volatile_goto("1:\n\t"
13 "nop\n\t" 13 "nop\n\t"
14 "nop\n\t" 14 "nop\n\t"
15 ".pushsection __jump_table, \"aw\"\n\t" 15 ".pushsection __jump_table, \"aw\"\n\t"
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index d385eaadece7..709798460763 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
166 * 166 *
167 * Atomically sets @v to @i and returns old @v 167 * Atomically sets @v to @i and returns old @v
168 */ 168 */
169static inline u64 atomic64_xchg(atomic64_t *v, u64 n) 169static inline long long atomic64_xchg(atomic64_t *v, long long n)
170{ 170{
171 return xchg64(&v->counter, n); 171 return xchg64(&v->counter, n);
172} 172}
@@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
180 * Atomically checks if @v holds @o and replaces it with @n if so. 180 * Atomically checks if @v holds @o and replaces it with @n if so.
181 * Returns the old value at @v. 181 * Returns the old value at @v.
182 */ 182 */
183static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n) 183static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
184 long long n)
184{ 185{
185 return cmpxchg64(&v->counter, o, n); 186 return cmpxchg64(&v->counter, o, n);
186} 187}
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 0d0395b1b152..1ad4a1f7d42b 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
80/* A 64bit atomic type */ 80/* A 64bit atomic type */
81 81
82typedef struct { 82typedef struct {
83 u64 __aligned(8) counter; 83 long long counter;
84} atomic64_t; 84} atomic64_t;
85 85
86#define ATOMIC64_INIT(val) { (val) } 86#define ATOMIC64_INIT(val) { (val) }
@@ -91,14 +91,14 @@ typedef struct {
91 * 91 *
92 * Atomically reads the value of @v. 92 * Atomically reads the value of @v.
93 */ 93 */
94static inline u64 atomic64_read(const atomic64_t *v) 94static inline long long atomic64_read(const atomic64_t *v)
95{ 95{
96 /* 96 /*
97 * Requires an atomic op to read both 32-bit parts consistently. 97 * Requires an atomic op to read both 32-bit parts consistently.
98 * Casting away const is safe since the atomic support routines 98 * Casting away const is safe since the atomic support routines
99 * do not write to memory if the value has not been modified. 99 * do not write to memory if the value has not been modified.
100 */ 100 */
101 return _atomic64_xchg_add((u64 *)&v->counter, 0); 101 return _atomic64_xchg_add((long long *)&v->counter, 0);
102} 102}
103 103
104/** 104/**
@@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
108 * 108 *
109 * Atomically adds @i to @v. 109 * Atomically adds @i to @v.
110 */ 110 */
111static inline void atomic64_add(u64 i, atomic64_t *v) 111static inline void atomic64_add(long long i, atomic64_t *v)
112{ 112{
113 _atomic64_xchg_add(&v->counter, i); 113 _atomic64_xchg_add(&v->counter, i);
114} 114}
@@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
120 * 120 *
121 * Atomically adds @i to @v and returns @i + @v 121 * Atomically adds @i to @v and returns @i + @v
122 */ 122 */
123static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 123static inline long long atomic64_add_return(long long i, atomic64_t *v)
124{ 124{
125 smp_mb(); /* barrier for proper semantics */ 125 smp_mb(); /* barrier for proper semantics */
126 return _atomic64_xchg_add(&v->counter, i) + i; 126 return _atomic64_xchg_add(&v->counter, i) + i;
@@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
135 * Atomically adds @a to @v, so long as @v was not already @u. 135 * Atomically adds @a to @v, so long as @v was not already @u.
136 * Returns non-zero if @v was not @u, and zero otherwise. 136 * Returns non-zero if @v was not @u, and zero otherwise.
137 */ 137 */
138static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 138static inline long long atomic64_add_unless(atomic64_t *v, long long a,
139 long long u)
139{ 140{
140 smp_mb(); /* barrier for proper semantics */ 141 smp_mb(); /* barrier for proper semantics */
141 return _atomic64_xchg_add_unless(&v->counter, a, u) != u; 142 return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
@@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
151 * atomic64_set() can't be just a raw store, since it would be lost if it 152 * atomic64_set() can't be just a raw store, since it would be lost if it
152 * fell between the load and store of one of the other atomic ops. 153 * fell between the load and store of one of the other atomic ops.
153 */ 154 */
154static inline void atomic64_set(atomic64_t *v, u64 n) 155static inline void atomic64_set(atomic64_t *v, long long n)
155{ 156{
156 _atomic64_xchg(&v->counter, n); 157 _atomic64_xchg(&v->counter, n);
157} 158}
@@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
236extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); 237extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
237extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); 238extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
238extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); 239extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
239extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n); 240extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
240extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n); 241 long long o, long long n);
241extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n); 242extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
242extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 243extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
243 int *lock, u64 o, u64 n); 244 long long n);
245extern long long __atomic64_xchg_add_unless(volatile long long *p,
246 int *lock, long long o, long long n);
244 247
245/* Return failure from the atomic wrappers. */ 248/* Return failure from the atomic wrappers. */
246struct __get_user __atomic_bad_address(int __user *addr); 249struct __get_user __atomic_bad_address(int __user *addr);
diff --git a/arch/tile/include/asm/cmpxchg.h b/arch/tile/include/asm/cmpxchg.h
index 4001d5eab4bb..0ccda3c425be 100644
--- a/arch/tile/include/asm/cmpxchg.h
+++ b/arch/tile/include/asm/cmpxchg.h
@@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
35int _atomic_xchg_add(int *v, int i); 35int _atomic_xchg_add(int *v, int i);
36int _atomic_xchg_add_unless(int *v, int a, int u); 36int _atomic_xchg_add_unless(int *v, int a, int u);
37int _atomic_cmpxchg(int *ptr, int o, int n); 37int _atomic_cmpxchg(int *ptr, int o, int n);
38u64 _atomic64_xchg(u64 *v, u64 n); 38long long _atomic64_xchg(long long *v, long long n);
39u64 _atomic64_xchg_add(u64 *v, u64 i); 39long long _atomic64_xchg_add(long long *v, long long i);
40u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u); 40long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
41u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n); 41long long _atomic64_cmpxchg(long long *v, long long o, long long n);
42 42
43#define xchg(ptr, n) \ 43#define xchg(ptr, n) \
44 ({ \ 44 ({ \
@@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
53 if (sizeof(*(ptr)) != 4) \ 53 if (sizeof(*(ptr)) != 4) \
54 __cmpxchg_called_with_bad_pointer(); \ 54 __cmpxchg_called_with_bad_pointer(); \
55 smp_mb(); \ 55 smp_mb(); \
56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \ 56 (typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
57 (int)n); \
57 }) 58 })
58 59
59#define xchg64(ptr, n) \ 60#define xchg64(ptr, n) \
@@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
61 if (sizeof(*(ptr)) != 8) \ 62 if (sizeof(*(ptr)) != 8) \
62 __xchg_called_with_bad_pointer(); \ 63 __xchg_called_with_bad_pointer(); \
63 smp_mb(); \ 64 smp_mb(); \
64 (typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \ 65 (typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
66 (long long)(n)); \
65 }) 67 })
66 68
67#define cmpxchg64(ptr, o, n) \ 69#define cmpxchg64(ptr, o, n) \
@@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
69 if (sizeof(*(ptr)) != 8) \ 71 if (sizeof(*(ptr)) != 8) \
70 __cmpxchg_called_with_bad_pointer(); \ 72 __cmpxchg_called_with_bad_pointer(); \
71 smp_mb(); \ 73 smp_mb(); \
72 (typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \ 74 (typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
75 (long long)o, (long long)n); \
73 }) 76 })
74 77
75#else 78#else
@@ -81,10 +84,11 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
81 switch (sizeof(*(ptr))) { \ 84 switch (sizeof(*(ptr))) { \
82 case 4: \ 85 case 4: \
83 __x = (typeof(__x))(unsigned long) \ 86 __x = (typeof(__x))(unsigned long) \
84 __insn_exch4((ptr), (u32)(unsigned long)(n)); \ 87 __insn_exch4((ptr), \
88 (u32)(unsigned long)(n)); \
85 break; \ 89 break; \
86 case 8: \ 90 case 8: \
87 __x = (typeof(__x)) \ 91 __x = (typeof(__x)) \
88 __insn_exch((ptr), (unsigned long)(n)); \ 92 __insn_exch((ptr), (unsigned long)(n)); \
89 break; \ 93 break; \
90 default: \ 94 default: \
@@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
103 switch (sizeof(*(ptr))) { \ 107 switch (sizeof(*(ptr))) { \
104 case 4: \ 108 case 4: \
105 __x = (typeof(__x))(unsigned long) \ 109 __x = (typeof(__x))(unsigned long) \
106 __insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \ 110 __insn_cmpexch4((ptr), \
111 (u32)(unsigned long)(n)); \
107 break; \ 112 break; \
108 case 8: \ 113 case 8: \
109 __x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \ 114 __x = (typeof(__x))__insn_cmpexch((ptr), \
115 (long long)(n)); \
110 break; \ 116 break; \
111 default: \ 117 default: \
112 __cmpxchg_called_with_bad_pointer(); \ 118 __cmpxchg_called_with_bad_pointer(); \
diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h
index 63294f5a8efb..4f7ae39fa202 100644
--- a/arch/tile/include/asm/percpu.h
+++ b/arch/tile/include/asm/percpu.h
@@ -15,9 +15,37 @@
15#ifndef _ASM_TILE_PERCPU_H 15#ifndef _ASM_TILE_PERCPU_H
16#define _ASM_TILE_PERCPU_H 16#define _ASM_TILE_PERCPU_H
17 17
18register unsigned long __my_cpu_offset __asm__("tp"); 18register unsigned long my_cpu_offset_reg asm("tp");
19#define __my_cpu_offset __my_cpu_offset 19
20#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) 20#ifdef CONFIG_PREEMPT
21/*
22 * For full preemption, we can't just use the register variable
23 * directly, since we need barrier() to hazard against it, causing the
24 * compiler to reload anything computed from a previous "tp" value.
25 * But we also don't want to use volatile asm, since we'd like the
26 * compiler to be able to cache the value across multiple percpu reads.
27 * So we use a fake stack read as a hazard against barrier().
28 * The 'U' constraint is like 'm' but disallows postincrement.
29 */
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long tp;
33 register unsigned long *sp asm("sp");
34 asm("move %0, tp" : "=r" (tp) : "U" (*sp));
35 return tp;
36}
37#define __my_cpu_offset __my_cpu_offset()
38#else
39/*
40 * We don't need to hazard against barrier() since "tp" doesn't ever
41 * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
42 * changes at function call points, at which we are already re-reading
43 * the value of "tp" due to "my_cpu_offset_reg" being a global variable.
44 */
45#define __my_cpu_offset my_cpu_offset_reg
46#endif
47
48#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
21 49
22#include <asm-generic/percpu.h> 50#include <asm-generic/percpu.h>
23 51
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index df27a1fd94a3..531f4c365351 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
66 0, 66 0,
67 "udn", 67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), 68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock), 69 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
70 NULL 70 NULL
71 }, 71 },
72#ifndef __tilepro__ 72#ifndef __tilepro__
@@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
77 1, /* disabled pending hypervisor support */ 77 1, /* disabled pending hypervisor support */
78 "idn", 78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), 79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock), 80 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
81 NULL 81 NULL
82 }, 82 },
83 { /* access to user-space IPI */ 83 { /* access to user-space IPI */
@@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
87 0, 87 0,
88 "ipi", 88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), 89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock), 90 __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
91 NULL 91 NULL
92 }, 92 },
93#endif 93#endif
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S
index 088d5c141e68..2cbe6d5dd6b0 100644
--- a/arch/tile/kernel/intvec_32.S
+++ b/arch/tile/kernel/intvec_32.S
@@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
815 } 815 }
816 bzt r28, 1f 816 bzt r28, 1f
817 bnz r29, 1f 817 bnz r29, 1f
818 /* Disable interrupts explicitly for preemption. */
819 IRQ_DISABLE(r20,r21)
820 TRACE_IRQS_OFF
818 jal preempt_schedule_irq 821 jal preempt_schedule_irq
819 FEEDBACK_REENTER(interrupt_return) 822 FEEDBACK_REENTER(interrupt_return)
8201: 8231:
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index ec755d3f3734..b8fc497f2437 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
841 } 841 }
842 beqzt r28, 1f 842 beqzt r28, 1f
843 bnez r29, 1f 843 bnez r29, 1f
844 /* Disable interrupts explicitly for preemption. */
845 IRQ_DISABLE(r20,r21)
846 TRACE_IRQS_OFF
844 jal preempt_schedule_irq 847 jal preempt_schedule_irq
845 FEEDBACK_REENTER(interrupt_return) 848 FEEDBACK_REENTER(interrupt_return)
8461: 8491:
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c
index 362284af3afd..c93977a62116 100644
--- a/arch/tile/kernel/stack.c
+++ b/arch/tile/kernel/stack.c
@@ -23,6 +23,7 @@
23#include <linux/mmzone.h> 23#include <linux/mmzone.h>
24#include <linux/dcache.h> 24#include <linux/dcache.h>
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/string.h>
26#include <asm/backtrace.h> 27#include <asm/backtrace.h>
27#include <asm/page.h> 28#include <asm/page.h>
28#include <asm/ucontext.h> 29#include <asm/ucontext.h>
@@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
332 } 333 }
333 334
334 if (vma->vm_file) { 335 if (vma->vm_file) {
335 char *s;
336 p = d_path(&vma->vm_file->f_path, buf, bufsize); 336 p = d_path(&vma->vm_file->f_path, buf, bufsize);
337 if (IS_ERR(p)) 337 if (IS_ERR(p))
338 p = "?"; 338 p = "?";
339 s = strrchr(p, '/'); 339 name = kbasename(p);
340 if (s)
341 p = s+1;
342 } else { 340 } else {
343 p = "anon"; 341 name = "anon";
344 } 342 }
345 343
346 /* Generate a string description of the vma info. */ 344 /* Generate a string description of the vma info. */
347 namelen = strlen(p); 345 namelen = strlen(name);
348 remaining = (bufsize - 1) - namelen; 346 remaining = (bufsize - 1) - namelen;
349 memmove(buf, p, namelen); 347 memmove(buf, name, namelen);
350 snprintf(buf + namelen, remaining, "[%lx+%lx] ", 348 snprintf(buf + namelen, remaining, "[%lx+%lx] ",
351 vma->vm_start, vma->vm_end - vma->vm_start); 349 vma->vm_start, vma->vm_end - vma->vm_start);
352} 350}
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 759efa337be8..c89b211fd9e7 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
107EXPORT_SYMBOL(_atomic_xor); 107EXPORT_SYMBOL(_atomic_xor);
108 108
109 109
110u64 _atomic64_xchg(u64 *v, u64 n) 110long long _atomic64_xchg(long long *v, long long n)
111{ 111{
112 return __atomic64_xchg(v, __atomic_setup(v), n); 112 return __atomic64_xchg(v, __atomic_setup(v), n);
113} 113}
114EXPORT_SYMBOL(_atomic64_xchg); 114EXPORT_SYMBOL(_atomic64_xchg);
115 115
116u64 _atomic64_xchg_add(u64 *v, u64 i) 116long long _atomic64_xchg_add(long long *v, long long i)
117{ 117{
118 return __atomic64_xchg_add(v, __atomic_setup(v), i); 118 return __atomic64_xchg_add(v, __atomic_setup(v), i);
119} 119}
120EXPORT_SYMBOL(_atomic64_xchg_add); 120EXPORT_SYMBOL(_atomic64_xchg_add);
121 121
122u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u) 122long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
123{ 123{
124 /* 124 /*
125 * Note: argument order is switched here since it is easier 125 * Note: argument order is switched here since it is easier
@@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
130} 130}
131EXPORT_SYMBOL(_atomic64_xchg_add_unless); 131EXPORT_SYMBOL(_atomic64_xchg_add_unless);
132 132
133u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n) 133long long _atomic64_cmpxchg(long long *v, long long o, long long n)
134{ 134{
135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n); 135 return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
136} 136}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ee2fb9d37745..f67e839f06c8 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
860 860
861config X86_UP_APIC 861config X86_UP_APIC
862 bool "Local APIC support on uniprocessors" 862 bool "Local APIC support on uniprocessors"
863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD 863 depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
864 ---help--- 864 ---help---
865 A local APIC (Advanced Programmable Interrupt Controller) is an 865 A local APIC (Advanced Programmable Interrupt Controller) is an
866 integrated interrupt controller in the CPU. If you have a single-CPU 866 integrated interrupt controller in the CPU. If you have a single-CPU
@@ -885,11 +885,11 @@ config X86_UP_IOAPIC
885 885
886config X86_LOCAL_APIC 886config X86_LOCAL_APIC
887 def_bool y 887 def_bool y
888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC 888 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
889 889
890config X86_IO_APIC 890config X86_IO_APIC
891 def_bool y 891 def_bool y
892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC 892 depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
893 893
894config X86_VISWS_APIC 894config X86_VISWS_APIC
895 def_bool y 895 def_bool y
@@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
1033 1033
1034config MICROCODE 1034config MICROCODE
1035 tristate "CPU microcode loading support" 1035 tristate "CPU microcode loading support"
1036 depends on CPU_SUP_AMD || CPU_SUP_INTEL
1036 select FW_LOADER 1037 select FW_LOADER
1037 ---help--- 1038 ---help---
1038 1039
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index d3f5c63078d8..89270b4318db 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
374 * Catch too early usage of this before alternatives 374 * Catch too early usage of this before alternatives
375 * have run. 375 * have run.
376 */ 376 */
377 asm goto("1: jmp %l[t_warn]\n" 377 asm_volatile_goto("1: jmp %l[t_warn]\n"
378 "2:\n" 378 "2:\n"
379 ".section .altinstructions,\"a\"\n" 379 ".section .altinstructions,\"a\"\n"
380 " .long 1b - .\n" 380 " .long 1b - .\n"
@@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
388 388
389#endif 389#endif
390 390
391 asm goto("1: jmp %l[t_no]\n" 391 asm_volatile_goto("1: jmp %l[t_no]\n"
392 "2:\n" 392 "2:\n"
393 ".section .altinstructions,\"a\"\n" 393 ".section .altinstructions,\"a\"\n"
394 " .long 1b - .\n" 394 " .long 1b - .\n"
@@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
453 * have. Thus, we force the jump to the widest, 4-byte, signed relative 453 * have. Thus, we force the jump to the widest, 4-byte, signed relative
454 * offset even though the last would often fit in less bytes. 454 * offset even though the last would often fit in less bytes.
455 */ 455 */
456 asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n" 456 asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
457 "2:\n" 457 "2:\n"
458 ".section .altinstructions,\"a\"\n" 458 ".section .altinstructions,\"a\"\n"
459 " .long 1b - .\n" /* src offset */ 459 " .long 1b - .\n" /* src offset */
diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h
index 64507f35800c..6a2cefb4395a 100644
--- a/arch/x86/include/asm/jump_label.h
+++ b/arch/x86/include/asm/jump_label.h
@@ -18,7 +18,7 @@
18 18
19static __always_inline bool arch_static_branch(struct static_key *key) 19static __always_inline bool arch_static_branch(struct static_key *key)
20{ 20{
21 asm goto("1:" 21 asm_volatile_goto("1:"
22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" 22 ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
23 ".pushsection __jump_table, \"aw\" \n\t" 23 ".pushsection __jump_table, \"aw\" \n\t"
24 _ASM_ALIGN "\n\t" 24 _ASM_ALIGN "\n\t"
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index e7e6751648ed..07537a44216e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -20,7 +20,7 @@
20static inline void __mutex_fastpath_lock(atomic_t *v, 20static inline void __mutex_fastpath_lock(atomic_t *v,
21 void (*fail_fn)(atomic_t *)) 21 void (*fail_fn)(atomic_t *))
22{ 22{
23 asm volatile goto(LOCK_PREFIX " decl %0\n" 23 asm_volatile_goto(LOCK_PREFIX " decl %0\n"
24 " jns %l[exit]\n" 24 " jns %l[exit]\n"
25 : : "m" (v->counter) 25 : : "m" (v->counter)
26 : "memory", "cc" 26 : "memory", "cc"
@@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
75static inline void __mutex_fastpath_unlock(atomic_t *v, 75static inline void __mutex_fastpath_unlock(atomic_t *v,
76 void (*fail_fn)(atomic_t *)) 76 void (*fail_fn)(atomic_t *))
77{ 77{
78 asm volatile goto(LOCK_PREFIX " incl %0\n" 78 asm_volatile_goto(LOCK_PREFIX " incl %0\n"
79 " jg %l[exit]\n" 79 " jg %l[exit]\n"
80 : : "m" (v->counter) 80 : : "m" (v->counter)
81 : "memory", "cc" 81 : "memory", "cc"
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 1191ac1c9d25..a419814cea57 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
113 break; 113 break;
114 case UV3_HUB_PART_NUMBER: 114 case UV3_HUB_PART_NUMBER:
115 case UV3_HUB_PART_NUMBER_X: 115 case UV3_HUB_PART_NUMBER_X:
116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1; 116 uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
117 break; 117 break;
118 } 118 }
119 119
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 897783b3302a..9d8449158cf9 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc; 1888 userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
1889 userpg->pmc_width = x86_pmu.cntval_bits; 1889 userpg->pmc_width = x86_pmu.cntval_bits;
1890 1890
1891 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) 1891 if (!sched_clock_stable)
1892 return;
1893
1894 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
1895 return; 1892 return;
1896 1893
1897 userpg->cap_user_time = 1; 1894 userpg->cap_user_time = 1;
@@ -1899,10 +1896,8 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
1899 userpg->time_shift = CYC2NS_SCALE_FACTOR; 1896 userpg->time_shift = CYC2NS_SCALE_FACTOR;
1900 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; 1897 userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
1901 1898
1902 if (sched_clock_stable && !check_tsc_disabled()) { 1899 userpg->cap_user_time_zero = 1;
1903 userpg->cap_user_time_zero = 1; 1900 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1904 userpg->time_zero = this_cpu_read(cyc2ns_offset);
1905 }
1906} 1901}
1907 1902
1908/* 1903/*
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 697b93af02dd..a0e2a8a80c94 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) 775 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
776 return; 776 return;
777 777
778 printk(KERN_INFO "KVM setup paravirtual spinlock\n"); 778 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
779 pv_lock_ops.unlock_kick = kvm_unlock_kick;
780}
781
782static __init int kvm_spinlock_init_jump(void)
783{
784 if (!kvm_para_available())
785 return 0;
786 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
787 return 0;
779 788
780 static_key_slow_inc(&paravirt_ticketlocks_enabled); 789 static_key_slow_inc(&paravirt_ticketlocks_enabled);
790 printk(KERN_INFO "KVM setup paravirtual spinlock\n");
781 791
782 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning); 792 return 0;
783 pv_lock_ops.unlock_kick = kvm_unlock_kick;
784} 793}
794early_initcall(kvm_spinlock_init_jump);
795
785#endif /* CONFIG_PARAVIRT_SPINLOCKS */ 796#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index e643e744e4d8..7e920bff99a3 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"), 326 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
327 }, 327 },
328 }, 328 },
329 { /* Handle problems with rebooting on the Latitude E5410. */
330 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5410",
332 .matches = {
333 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
334 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
335 },
336 },
329 { /* Handle problems with rebooting on the Latitude E5420. */ 337 { /* Handle problems with rebooting on the Latitude E5420. */
330 .callback = set_pci_reboot, 338 .callback = set_pci_reboot,
331 .ident = "Dell Latitude E5420", 339 .ident = "Dell Latitude E5420",
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3b8e7459dd4d..2b2fce1b2009 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3255 3255
3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu) 3256static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3257{ 3257{
3258 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3259
3258 if (!test_bit(VCPU_EXREG_PDPTR, 3260 if (!test_bit(VCPU_EXREG_PDPTR,
3259 (unsigned long *)&vcpu->arch.regs_dirty)) 3261 (unsigned long *)&vcpu->arch.regs_dirty))
3260 return; 3262 return;
3261 3263
3262 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3264 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3263 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]); 3265 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
3264 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]); 3266 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
3265 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]); 3267 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
3266 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]); 3268 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
3267 } 3269 }
3268} 3270}
3269 3271
3270static void ept_save_pdptrs(struct kvm_vcpu *vcpu) 3272static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3271{ 3273{
3274 struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
3275
3272 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) { 3276 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3273 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0); 3277 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3274 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1); 3278 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3275 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2); 3279 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3276 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3); 3280 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3277 } 3281 }
3278 3282
3279 __set_bit(VCPU_EXREG_PDPTR, 3283 __set_bit(VCPU_EXREG_PDPTR,
@@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7777 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1); 7781 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
7778 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2); 7782 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
7779 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3); 7783 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
7780 __clear_bit(VCPU_EXREG_PDPTR,
7781 (unsigned long *)&vcpu->arch.regs_avail);
7782 __clear_bit(VCPU_EXREG_PDPTR,
7783 (unsigned long *)&vcpu->arch.regs_dirty);
7784 } 7784 }
7785 7785
7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp); 7786 kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index d1e4777b4e75..31d04758b76f 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
278 old memory can be recycled */ 278 old memory can be recycled */
279 make_lowmem_page_readwrite(xen_initial_gdt); 279 make_lowmem_page_readwrite(xen_initial_gdt);
280 280
281#ifdef CONFIG_X86_32
282 /*
283 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
284 * expects __USER_DS
285 */
286 loadsegment(ds, __USER_DS);
287 loadsegment(es, __USER_DS);
288#endif
289
281 xen_filter_cpu_maps(); 290 xen_filter_cpu_maps();
282 xen_setup_vcpu_info_placement(); 291 xen_setup_vcpu_info_placement();
283 } 292 }