aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arc/include/asm/barrier.h37
-rw-r--r--arch/arm/boot/dts/spear320-hmi.dts2
-rw-r--r--arch/arm/configs/bcm_defconfig2
-rw-r--r--arch/arm/mach-spear/headsmp.S2
-rw-r--r--arch/arm/mach-spear/platsmp.c2
-rw-r--r--arch/arm/mach-spear/time.c2
-rw-r--r--arch/arm/mach-vexpress/dcscb.c7
-rw-r--r--arch/arm/mach-vexpress/spc.c4
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ivt.S2
-rw-r--r--arch/ia64/kvm/vmm_ivt.S2
-rw-r--r--arch/mips/mm/cache.c4
-rw-r--r--arch/parisc/include/asm/shmparam.h5
-rw-r--r--arch/parisc/kernel/cache.c3
-rw-r--r--arch/parisc/kernel/sys_parisc.c14
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/lib/memcpy.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/mm/numa.c1
-rw-r--r--arch/s390/include/asm/sigp.h19
-rw-r--r--arch/s390/include/asm/smp.h13
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/compat_wrapper.c3
-rw-r--r--arch/s390/kernel/dumpstack.c8
-rw-r--r--arch/s390/kernel/ptrace.c2
-rw-r--r--arch/s390/kernel/setup.c32
-rw-r--r--arch/s390/kernel/smp.c15
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/lib/uaccess.c5
-rw-r--r--arch/s390/mm/fault.c140
-rw-r--r--arch/x86/Makefile4
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c45
-rw-r--r--arch/x86/kernel/early-quirks.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c16
-rw-r--r--arch/x86/kernel/reboot.c72
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/mmu.c38
-rw-r--r--arch/x86/kvm/mmu.h44
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c11
-rw-r--r--arch/x86/kvm/x86.c10
-rw-r--r--arch/x86/syscalls/Makefile2
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/tools/Makefile2
-rw-r--r--arch/x86/xen/smp.c3
-rw-r--r--arch/x86/xen/spinlock.c5
-rw-r--r--arch/x86/xen/xen-asm_32.S25
52 files changed, 466 insertions, 198 deletions
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
deleted file mode 100644
index c32245c3d1e9..000000000000
--- a/arch/arc/include/asm/barrier.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef __ASM_BARRIER_H
10#define __ASM_BARRIER_H
11
12#ifndef __ASSEMBLY__
13
14/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
15#define mb() __asm__ __volatile__ ("" : : : "memory")
16#define rmb() mb()
17#define wmb() mb()
18#define set_mb(var, value) do { var = value; mb(); } while (0)
19#define set_wmb(var, value) do { var = value; wmb(); } while (0)
20#define read_barrier_depends() mb()
21
22/* TODO-vineetg verify the correctness of macros here */
23#ifdef CONFIG_SMP
24#define smp_mb() mb()
25#define smp_rmb() rmb()
26#define smp_wmb() wmb()
27#else
28#define smp_mb() barrier()
29#define smp_rmb() barrier()
30#define smp_wmb() barrier()
31#endif
32
33#define smp_read_barrier_depends() do { } while (0)
34
35#endif
36
37#endif
diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
index 3075d2d3a8be..0aa6fef5ce22 100644
--- a/arch/arm/boot/dts/spear320-hmi.dts
+++ b/arch/arm/boot/dts/spear320-hmi.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 Evaluation Baord 2 * DTS file for SPEAr320 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Shiraz Hashim <shiraz.hashim@st.com> 4 * Copyright 2012 Shiraz Hashim <shiraz.linux.kernel@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 01004640ee4d..3df3f3a79ef4 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -132,7 +132,7 @@ CONFIG_CRC_ITU_T=y
132CONFIG_CRC7=y 132CONFIG_CRC7=y
133CONFIG_XZ_DEC=y 133CONFIG_XZ_DEC=y
134CONFIG_AVERAGE=y 134CONFIG_AVERAGE=y
135CONFIG_PINCTRL_CAPRI=y 135CONFIG_PINCTRL_BCM281XX=y
136CONFIG_WATCHDOG=y 136CONFIG_WATCHDOG=y
137CONFIG_BCM_KONA_WDT=y 137CONFIG_BCM_KONA_WDT=y
138CONFIG_BCM_KONA_WDT_DEBUG=y 138CONFIG_BCM_KONA_WDT_DEBUG=y
diff --git a/arch/arm/mach-spear/headsmp.S b/arch/arm/mach-spear/headsmp.S
index ed85473a047f..c52192dc3d9f 100644
--- a/arch/arm/mach-spear/headsmp.S
+++ b/arch/arm/mach-spear/headsmp.S
@@ -3,7 +3,7 @@
3 * 3 *
4 * Picked from realview 4 * Picked from realview
5 * Copyright (c) 2012 ST Microelectronics Limited 5 * Copyright (c) 2012 ST Microelectronics Limited
6 * Shiraz Hashim <shiraz.hashim@st.com> 6 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c
index 5c4a19887b2b..c19751fff2c6 100644
--- a/arch/arm/mach-spear/platsmp.c
+++ b/arch/arm/mach-spear/platsmp.c
@@ -4,7 +4,7 @@
4 * based upon linux/arch/arm/mach-realview/platsmp.c 4 * based upon linux/arch/arm/mach-realview/platsmp.c
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics Ltd. 6 * Copyright (C) 2012 ST Microelectronics Ltd.
7 * Shiraz Hashim <shiraz.hashim@st.com> 7 * Shiraz Hashim <shiraz.linux.kernel@gmail.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-spear/time.c b/arch/arm/mach-spear/time.c
index c4d0931fc6ee..26fda4ed4d51 100644
--- a/arch/arm/mach-spear/time.c
+++ b/arch/arm/mach-spear/time.c
@@ -2,7 +2,7 @@
2 * arch/arm/plat-spear/time.c 2 * arch/arm/plat-spear/time.c
3 * 3 *
4 * Copyright (C) 2010 ST Microelectronics 4 * Copyright (C) 2010 ST Microelectronics
5 * Shiraz Hashim<shiraz.hashim@st.com> 5 * Shiraz Hashim<shiraz.linux.kernel@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 788495d35cf9..30b993399ed7 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -51,12 +51,14 @@ static int dcscb_allcpus_mask[2];
51static int dcscb_power_up(unsigned int cpu, unsigned int cluster) 51static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
52{ 52{
53 unsigned int rst_hold, cpumask = (1 << cpu); 53 unsigned int rst_hold, cpumask = (1 << cpu);
54 unsigned int all_mask = dcscb_allcpus_mask[cluster]; 54 unsigned int all_mask;
55 55
56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 56 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
57 if (cpu >= 4 || cluster >= 2) 57 if (cpu >= 4 || cluster >= 2)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 all_mask = dcscb_allcpus_mask[cluster];
61
60 /* 62 /*
61 * Since this is called with IRQs enabled, and no arch_spin_lock_irq 63 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
62 * variant exists, we need to disable IRQs manually here. 64 * variant exists, we need to disable IRQs manually here.
@@ -101,11 +103,12 @@ static void dcscb_power_down(void)
101 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 103 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
102 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); 104 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
103 cpumask = (1 << cpu); 105 cpumask = (1 << cpu);
104 all_mask = dcscb_allcpus_mask[cluster];
105 106
106 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); 107 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
107 BUG_ON(cpu >= 4 || cluster >= 2); 108 BUG_ON(cpu >= 4 || cluster >= 2);
108 109
110 all_mask = dcscb_allcpus_mask[cluster];
111
109 __mcpm_cpu_going_down(cpu, cluster); 112 __mcpm_cpu_going_down(cpu, cluster);
110 113
111 arch_spin_lock(&dcscb_lock); 114 arch_spin_lock(&dcscb_lock);
diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c
index c26ef5b92ca7..2c2754e79cb3 100644
--- a/arch/arm/mach-vexpress/spc.c
+++ b/arch/arm/mach-vexpress/spc.c
@@ -392,7 +392,7 @@ static irqreturn_t ve_spc_irq_handler(int irq, void *data)
392 * +--------------------------+ 392 * +--------------------------+
393 * | 31 20 | 19 0 | 393 * | 31 20 | 19 0 |
394 * +--------------------------+ 394 * +--------------------------+
395 * | u_volt | freq(kHz) | 395 * | m_volt | freq(kHz) |
396 * +--------------------------+ 396 * +--------------------------+
397 */ 397 */
398#define MULT_FACTOR 20 398#define MULT_FACTOR 20
@@ -414,7 +414,7 @@ static int ve_spc_populate_opps(uint32_t cluster)
414 ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data); 414 ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
415 if (!ret) { 415 if (!ret) {
416 opps->freq = (data & FREQ_MASK) * MULT_FACTOR; 416 opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
417 opps->u_volt = data >> VOLT_SHIFT; 417 opps->u_volt = (data >> VOLT_SHIFT) * 1000;
418 } else { 418 } else {
419 break; 419 break;
420 } 420 }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index e6f80fcf013b..a4acddad0c78 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -259,7 +259,7 @@ start_ap:
259 * Switch into virtual mode: 259 * Switch into virtual mode:
260 */ 260 */
261 movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \ 261 movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH|IA64_PSR_BN \
262 |IA64_PSR_DI|IA64_PSR_AC) 262 |IA64_PSR_DI)
263 ;; 263 ;;
264 mov cr.ipsr=r16 264 mov cr.ipsr=r16
265 movl r17=1f 265 movl r17=1f
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 689ffcaa284e..18e794a57248 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -58,7 +58,7 @@
58#include <asm/unistd.h> 58#include <asm/unistd.h>
59#include <asm/errno.h> 59#include <asm/errno.h>
60 60
61#if 1 61#if 0
62# define PSR_DEFAULT_BITS psr.ac 62# define PSR_DEFAULT_BITS psr.ac
63#else 63#else
64# define PSR_DEFAULT_BITS 0 64# define PSR_DEFAULT_BITS 0
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
index 24018484c6e9..397e34a63e18 100644
--- a/arch/ia64/kvm/vmm_ivt.S
+++ b/arch/ia64/kvm/vmm_ivt.S
@@ -64,7 +64,7 @@
64#include "kvm_minstate.h" 64#include "kvm_minstate.h"
65#include "vti.h" 65#include "vti.h"
66 66
67#if 1 67#if 0
68# define PSR_DEFAULT_BITS psr.ac 68# define PSR_DEFAULT_BITS psr.ac
69#else 69#else
70# define PSR_DEFAULT_BITS 0 70# define PSR_DEFAULT_BITS 0
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index e422b38d3113..9e67cdea3c74 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -29,15 +29,15 @@ void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, 29void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
30 unsigned long pfn); 30 unsigned long pfn);
31void (*flush_icache_range)(unsigned long start, unsigned long end); 31void (*flush_icache_range)(unsigned long start, unsigned long end);
32EXPORT_SYMBOL_GPL(flush_icache_range);
32void (*local_flush_icache_range)(unsigned long start, unsigned long end); 33void (*local_flush_icache_range)(unsigned long start, unsigned long end);
33 34
34void (*__flush_cache_vmap)(void); 35void (*__flush_cache_vmap)(void);
35void (*__flush_cache_vunmap)(void); 36void (*__flush_cache_vunmap)(void);
36 37
37void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 38void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
38void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
39
40EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); 39EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range);
40void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
41 41
42/* MIPS specific cache operations */ 42/* MIPS specific cache operations */
43void (*flush_cache_sigtramp)(unsigned long addr); 43void (*flush_cache_sigtramp)(unsigned long addr);
diff --git a/arch/parisc/include/asm/shmparam.h b/arch/parisc/include/asm/shmparam.h
index 628ddc22faa8..afe1300ab667 100644
--- a/arch/parisc/include/asm/shmparam.h
+++ b/arch/parisc/include/asm/shmparam.h
@@ -1,8 +1,7 @@
1#ifndef _ASMPARISC_SHMPARAM_H 1#ifndef _ASMPARISC_SHMPARAM_H
2#define _ASMPARISC_SHMPARAM_H 2#define _ASMPARISC_SHMPARAM_H
3 3
4#define __ARCH_FORCE_SHMLBA 1 4#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
5 5#define SHM_COLOUR 0x00400000 /* shared mappings colouring */
6#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */
7 6
8#endif /* _ASMPARISC_SHMPARAM_H */ 7#endif /* _ASMPARISC_SHMPARAM_H */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a6ffc775a9f8..f6448c7c62b5 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -323,7 +323,8 @@ void flush_dcache_page(struct page *page)
323 * specifically accesses it, of course) */ 323 * specifically accesses it, of course) */
324 324
325 flush_tlb_page(mpnt, addr); 325 flush_tlb_page(mpnt, addr);
326 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 326 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
327 != (addr & (SHM_COLOUR - 1))) {
327 __flush_cache_page(mpnt, addr, page_to_phys(page)); 328 __flush_cache_page(mpnt, addr, page_to_phys(page));
328 if (old_addr) 329 if (old_addr)
329 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 330 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index b7cadc4a06cd..31ffa9b55322 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -45,7 +45,7 @@
45 45
46static int get_offset(unsigned int last_mmap) 46static int get_offset(unsigned int last_mmap)
47{ 47{
48 return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT; 48 return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
49} 49}
50 50
51static unsigned long shared_align_offset(unsigned int last_mmap, 51static unsigned long shared_align_offset(unsigned int last_mmap,
@@ -57,8 +57,8 @@ static unsigned long shared_align_offset(unsigned int last_mmap,
57static inline unsigned long COLOR_ALIGN(unsigned long addr, 57static inline unsigned long COLOR_ALIGN(unsigned long addr,
58 unsigned int last_mmap, unsigned long pgoff) 58 unsigned int last_mmap, unsigned long pgoff)
59{ 59{
60 unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1); 60 unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
61 unsigned long off = (SHMLBA-1) & 61 unsigned long off = (SHM_COLOUR-1) &
62 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT); 62 (shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
63 63
64 return base + off; 64 return base + off;
@@ -101,7 +101,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
101 if (flags & MAP_FIXED) { 101 if (flags & MAP_FIXED) {
102 if ((flags & MAP_SHARED) && last_mmap && 102 if ((flags & MAP_SHARED) && last_mmap &&
103 (addr - shared_align_offset(last_mmap, pgoff)) 103 (addr - shared_align_offset(last_mmap, pgoff))
104 & (SHMLBA - 1)) 104 & (SHM_COLOUR - 1))
105 return -EINVAL; 105 return -EINVAL;
106 goto found_addr; 106 goto found_addr;
107 } 107 }
@@ -122,7 +122,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
122 info.length = len; 122 info.length = len;
123 info.low_limit = mm->mmap_legacy_base; 123 info.low_limit = mm->mmap_legacy_base;
124 info.high_limit = mmap_upper_limit(); 124 info.high_limit = mmap_upper_limit();
125 info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; 125 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
126 info.align_offset = shared_align_offset(last_mmap, pgoff); 126 info.align_offset = shared_align_offset(last_mmap, pgoff);
127 addr = vm_unmapped_area(&info); 127 addr = vm_unmapped_area(&info);
128 128
@@ -161,7 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
161 if (flags & MAP_FIXED) { 161 if (flags & MAP_FIXED) {
162 if ((flags & MAP_SHARED) && last_mmap && 162 if ((flags & MAP_SHARED) && last_mmap &&
163 (addr - shared_align_offset(last_mmap, pgoff)) 163 (addr - shared_align_offset(last_mmap, pgoff))
164 & (SHMLBA - 1)) 164 & (SHM_COLOUR - 1))
165 return -EINVAL; 165 return -EINVAL;
166 goto found_addr; 166 goto found_addr;
167 } 167 }
@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
182 info.length = len; 182 info.length = len;
183 info.low_limit = PAGE_SIZE; 183 info.low_limit = PAGE_SIZE;
184 info.high_limit = mm->mmap_base; 184 info.high_limit = mm->mmap_base;
185 info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0; 185 info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
186 info.align_offset = shared_align_offset(last_mmap, pgoff); 186 info.align_offset = shared_align_offset(last_mmap, pgoff);
187 addr = vm_unmapped_area(&info); 187 addr = vm_unmapped_area(&info);
188 if (!(addr & ~PAGE_MASK)) 188 if (!(addr & ~PAGE_MASK))
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 80e5dd248934..83ead0ea127d 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -392,7 +392,7 @@
392 ENTRY_COMP(vmsplice) 392 ENTRY_COMP(vmsplice)
393 ENTRY_COMP(move_pages) /* 295 */ 393 ENTRY_COMP(move_pages) /* 295 */
394 ENTRY_SAME(getcpu) 394 ENTRY_SAME(getcpu)
395 ENTRY_SAME(epoll_pwait) 395 ENTRY_COMP(epoll_pwait)
396 ENTRY_COMP(statfs64) 396 ENTRY_COMP(statfs64)
397 ENTRY_COMP(fstatfs64) 397 ENTRY_COMP(fstatfs64)
398 ENTRY_COMP(kexec_load) /* 300 */ 398 ENTRY_COMP(kexec_load) /* 300 */
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index 413dc1769299..b2b441b32341 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -470,7 +470,7 @@ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
470 return 0; 470 return 0;
471 471
472 /* if a load or store fault occured we can get the faulty addr */ 472 /* if a load or store fault occured we can get the faulty addr */
473 d = &__get_cpu_var(exception_data); 473 d = this_cpu_ptr(&exception_data);
474 fault_addr = d->fault_addr; 474 fault_addr = d->fault_addr;
475 475
476 /* error in load or store? */ 476 /* error in load or store? */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 9d08c71a967e..747550762f3c 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -151,7 +151,7 @@ int fixup_exception(struct pt_regs *regs)
151 fix = search_exception_tables(regs->iaoq[0]); 151 fix = search_exception_tables(regs->iaoq[0]);
152 if (fix) { 152 if (fix) {
153 struct exception_data *d; 153 struct exception_data *d;
154 d = &__get_cpu_var(exception_data); 154 d = this_cpu_ptr(&exception_data);
155 d->fault_ip = regs->iaoq[0]; 155 d->fault_ip = regs->iaoq[0];
156 d->fault_space = regs->isr; 156 d->fault_space = regs->isr;
157 d->fault_addr = regs->ior; 157 d->fault_addr = regs->ior;
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 2a4779091a58..155013da27e0 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -208,7 +208,7 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
208 unsigned long in_devfn) 208 unsigned long in_devfn)
209{ 209{
210 struct pci_controller* hose; 210 struct pci_controller* hose;
211 struct pci_bus *bus = NULL; 211 struct pci_bus *tmp_bus, *bus = NULL;
212 struct device_node *hose_node; 212 struct device_node *hose_node;
213 213
214 /* Argh ! Please forgive me for that hack, but that's the 214 /* Argh ! Please forgive me for that hack, but that's the
@@ -229,10 +229,12 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
229 * used on pre-domains setup. We return the first match 229 * used on pre-domains setup. We return the first match
230 */ 230 */
231 231
232 list_for_each_entry(bus, &pci_root_buses, node) { 232 list_for_each_entry(tmp_bus, &pci_root_buses, node) {
233 if (in_bus >= bus->number && in_bus <= bus->busn_res.end) 233 if (in_bus >= tmp_bus->number &&
234 in_bus <= tmp_bus->busn_res.end) {
235 bus = tmp_bus;
234 break; 236 break;
235 bus = NULL; 237 }
236 } 238 }
237 if (bus == NULL || bus->dev.of_node == NULL) 239 if (bus == NULL || bus->dev.of_node == NULL)
238 return -ENODEV; 240 return -ENODEV;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 4ebbb9e99286..3b181b22cd46 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -232,6 +232,7 @@ int __node_distance(int a, int b)
232 232
233 return distance; 233 return distance;
234} 234}
235EXPORT_SYMBOL(__node_distance);
235 236
236static void initialize_distance_lookup_table(int nid, 237static void initialize_distance_lookup_table(int nid,
237 const __be32 *associativity) 238 const __be32 *associativity)
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index d091aa1aaf11..bf9c823d4020 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -31,4 +31,23 @@
31#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL 31#define SIGP_STATUS_INCORRECT_STATE 0x00000200UL
32#define SIGP_STATUS_NOT_RUNNING 0x00000400UL 32#define SIGP_STATUS_NOT_RUNNING 0x00000400UL
33 33
34#ifndef __ASSEMBLY__
35
36static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
37{
38 register unsigned int reg1 asm ("1") = parm;
39 int cc;
40
41 asm volatile(
42 " sigp %1,%2,0(%3)\n"
43 " ipm %0\n"
44 " srl %0,28\n"
45 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
46 if (status && cc == 1)
47 *status = reg1;
48 return cc;
49}
50
51#endif /* __ASSEMBLY__ */
52
34#endif /* __S390_ASM_SIGP_H */ 53#endif /* __S390_ASM_SIGP_H */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 160779394096..21703f85b48d 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -7,6 +7,8 @@
7#ifndef __ASM_SMP_H 7#ifndef __ASM_SMP_H
8#define __ASM_SMP_H 8#define __ASM_SMP_H
9 9
10#include <asm/sigp.h>
11
10#ifdef CONFIG_SMP 12#ifdef CONFIG_SMP
11 13
12#include <asm/lowcore.h> 14#include <asm/lowcore.h>
@@ -50,9 +52,18 @@ static inline int smp_store_status(int cpu) { return 0; }
50static inline int smp_vcpu_scheduled(int cpu) { return 1; } 52static inline int smp_vcpu_scheduled(int cpu) { return 1; }
51static inline void smp_yield_cpu(int cpu) { } 53static inline void smp_yield_cpu(int cpu) { }
52static inline void smp_yield(void) { } 54static inline void smp_yield(void) { }
53static inline void smp_stop_cpu(void) { }
54static inline void smp_fill_possible_mask(void) { } 55static inline void smp_fill_possible_mask(void) { }
55 56
57static inline void smp_stop_cpu(void)
58{
59 u16 pcpu = stap();
60
61 for (;;) {
62 __pcpu_sigp(pcpu, SIGP_STOP, 0, NULL);
63 cpu_relax();
64 }
65}
66
56#endif /* CONFIG_SMP */ 67#endif /* CONFIG_SMP */
57 68
58#ifdef CONFIG_HOTPLUG_CPU 69#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 5eb5c9ddb120..3802d2d3a18d 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -282,7 +282,8 @@
282#define __NR_finit_module 344 282#define __NR_finit_module 344
283#define __NR_sched_setattr 345 283#define __NR_sched_setattr 345
284#define __NR_sched_getattr 346 284#define __NR_sched_getattr 346
285#define NR_syscalls 345 285#define __NR_renameat2 347
286#define NR_syscalls 348
286 287
287/* 288/*
288 * There are some system calls that are not present on 64 bit, some 289 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 824c39dfddfc..45cdb37aa6f8 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Compat sytem call wrappers. 2 * Compat system call wrappers.
3 * 3 *
4 * Copyright IBM Corp. 2014 4 * Copyright IBM Corp. 2014
5 */ 5 */
@@ -213,3 +213,4 @@ COMPAT_SYSCALL_WRAP5(kcmp, pid_t, pid1, pid_t, pid2, int, type, unsigned long, i
213COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags); 213COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, flags);
214COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); 214COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags);
215COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); 215COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags);
216COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags);
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index e6af9406987c..acb412442e5e 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -144,10 +144,10 @@ void show_registers(struct pt_regs *regs)
144 char *mode; 144 char *mode;
145 145
146 mode = user_mode(regs) ? "User" : "Krnl"; 146 mode = user_mode(regs) ? "User" : "Krnl";
147 printk("%s PSW : %p %p (%pSR)\n", 147 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
148 mode, (void *) regs->psw.mask, 148 if (!user_mode(regs))
149 (void *) regs->psw.addr, 149 printk(" (%pSR)", (void *)regs->psw.addr);
150 (void *) regs->psw.addr); 150 printk("\n");
151 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x " 151 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
152 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER), 152 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
153 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO), 153 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 4ac8fafec95f..1c82619eb4f7 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -64,7 +64,7 @@ void update_cr_regs(struct task_struct *task)
64 if (task->thread.per_flags & PER_FLAG_NO_TE) 64 if (task->thread.per_flags & PER_FLAG_NO_TE)
65 cr_new &= ~(1UL << 55); 65 cr_new &= ~(1UL << 55);
66 if (cr_new != cr) 66 if (cr_new != cr)
67 __ctl_load(cr, 0, 0); 67 __ctl_load(cr_new, 0, 0);
68 /* Set or clear transaction execution TDC bits 62 and 63. */ 68 /* Set or clear transaction execution TDC bits 62 and 63. */
69 __ctl_store(cr, 2, 2); 69 __ctl_store(cr, 2, 2);
70 cr_new = cr & ~3UL; 70 cr_new = cr & ~3UL;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index f70f2489fa5f..88d1ca81e2dd 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1027,3 +1027,35 @@ void __init setup_arch(char **cmdline_p)
1027 /* Setup zfcpdump support */ 1027 /* Setup zfcpdump support */
1028 setup_zfcpdump(); 1028 setup_zfcpdump();
1029} 1029}
1030
1031#ifdef CONFIG_32BIT
1032static int no_removal_warning __initdata;
1033
1034static int __init parse_no_removal_warning(char *str)
1035{
1036 no_removal_warning = 1;
1037 return 0;
1038}
1039__setup("no_removal_warning", parse_no_removal_warning);
1040
1041static int __init removal_warning(void)
1042{
1043 if (no_removal_warning)
1044 return 0;
1045 printk(KERN_ALERT "\n\n");
1046 printk(KERN_CONT "Warning - you are using a 31 bit kernel!\n\n");
1047 printk(KERN_CONT "We plan to remove 31 bit kernel support from the kernel sources in March 2015.\n");
1048 printk(KERN_CONT "Currently we assume that nobody is using the 31 bit kernel on old 31 bit\n");
1049 printk(KERN_CONT "hardware anymore. If you think that the code should not be removed and also\n");
1050 printk(KERN_CONT "future versions of the Linux kernel should be able to run in 31 bit mode\n");
1051 printk(KERN_CONT "please let us know. Please write to:\n");
1052 printk(KERN_CONT "linux390@de.ibm.com (mail address) and/or\n");
1053 printk(KERN_CONT "linux-s390@vger.kernel.org (mailing list).\n\n");
1054 printk(KERN_CONT "Thank you!\n\n");
1055 printk(KERN_CONT "If this kernel runs on a 64 bit machine you may consider using a 64 bit kernel.\n");
1056 printk(KERN_CONT "This message can be disabled with the \"no_removal_warning\" kernel parameter.\n");
1057 schedule_timeout_uninterruptible(300 * HZ);
1058 return 0;
1059}
1060early_initcall(removal_warning);
1061#endif
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 512ce1cde2a4..86e65ec3422b 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -82,21 +82,6 @@ DEFINE_MUTEX(smp_cpu_state_mutex);
82/* 82/*
83 * Signal processor helper functions. 83 * Signal processor helper functions.
84 */ 84 */
85static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
86{
87 register unsigned int reg1 asm ("1") = parm;
88 int cc;
89
90 asm volatile(
91 " sigp %1,%2,0(%3)\n"
92 " ipm %0\n"
93 " srl %0,28\n"
94 : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
95 if (status && cc == 1)
96 *status = reg1;
97 return cc;
98}
99
100static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status) 85static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
101{ 86{
102 int cc; 87 int cc;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 542ef488bac1..fe5cdf29a001 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -355,3 +355,4 @@ SYSCALL(sys_kcmp,sys_kcmp,compat_sys_kcmp)
355SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) 355SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module)
356SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ 356SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */
357SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) 357SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr)
358SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 23f866b4c7f1..7416efe8eae4 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -338,9 +338,6 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
338 register unsigned long reg0 asm("0") = 0; 338 register unsigned long reg0 asm("0") = 0;
339 unsigned long tmp1, tmp2; 339 unsigned long tmp1, tmp2;
340 340
341 if (unlikely(!size))
342 return 0;
343 update_primary_asce(current);
344 asm volatile( 341 asm volatile(
345 " la %2,0(%1)\n" 342 " la %2,0(%1)\n"
346 " la %3,0(%0,%1)\n" 343 " la %3,0(%0,%1)\n"
@@ -359,6 +356,8 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
359 356
360unsigned long __strnlen_user(const char __user *src, unsigned long size) 357unsigned long __strnlen_user(const char __user *src, unsigned long size)
361{ 358{
359 if (unlikely(!size))
360 return 0;
362 update_primary_asce(current); 361 update_primary_asce(current);
363 return strnlen_user_srst(src, size); 362 return strnlen_user_srst(src, size);
364} 363}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 19f623f1f21c..2f51a998a67e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -126,6 +126,133 @@ static inline int user_space_fault(struct pt_regs *regs)
126 return 0; 126 return 0;
127} 127}
128 128
129static int bad_address(void *p)
130{
131 unsigned long dummy;
132
133 return probe_kernel_address((unsigned long *)p, dummy);
134}
135
136#ifdef CONFIG_64BIT
137static void dump_pagetable(unsigned long asce, unsigned long address)
138{
139 unsigned long *table = __va(asce & PAGE_MASK);
140
141 pr_alert("AS:%016lx ", asce);
142 switch (asce & _ASCE_TYPE_MASK) {
143 case _ASCE_TYPE_REGION1:
144 table = table + ((address >> 53) & 0x7ff);
145 if (bad_address(table))
146 goto bad;
147 pr_cont("R1:%016lx ", *table);
148 if (*table & _REGION_ENTRY_INVALID)
149 goto out;
150 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
151 /* fallthrough */
152 case _ASCE_TYPE_REGION2:
153 table = table + ((address >> 42) & 0x7ff);
154 if (bad_address(table))
155 goto bad;
156 pr_cont("R2:%016lx ", *table);
157 if (*table & _REGION_ENTRY_INVALID)
158 goto out;
159 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
160 /* fallthrough */
161 case _ASCE_TYPE_REGION3:
162 table = table + ((address >> 31) & 0x7ff);
163 if (bad_address(table))
164 goto bad;
165 pr_cont("R3:%016lx ", *table);
166 if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE))
167 goto out;
168 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
169 /* fallthrough */
170 case _ASCE_TYPE_SEGMENT:
171 table = table + ((address >> 20) & 0x7ff);
172 if (bad_address(table))
173 goto bad;
174 pr_cont(KERN_CONT "S:%016lx ", *table);
175 if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE))
176 goto out;
177 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
178 }
179 table = table + ((address >> 12) & 0xff);
180 if (bad_address(table))
181 goto bad;
182 pr_cont("P:%016lx ", *table);
183out:
184 pr_cont("\n");
185 return;
186bad:
187 pr_cont("BAD\n");
188}
189
190#else /* CONFIG_64BIT */
191
192static void dump_pagetable(unsigned long asce, unsigned long address)
193{
194 unsigned long *table = __va(asce & PAGE_MASK);
195
196 pr_alert("AS:%08lx ", asce);
197 table = table + ((address >> 20) & 0x7ff);
198 if (bad_address(table))
199 goto bad;
200 pr_cont("S:%08lx ", *table);
201 if (*table & _SEGMENT_ENTRY_INVALID)
202 goto out;
203 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
204 table = table + ((address >> 12) & 0xff);
205 if (bad_address(table))
206 goto bad;
207 pr_cont("P:%08lx ", *table);
208out:
209 pr_cont("\n");
210 return;
211bad:
212 pr_cont("BAD\n");
213}
214
215#endif /* CONFIG_64BIT */
216
217static void dump_fault_info(struct pt_regs *regs)
218{
219 unsigned long asce;
220
221 pr_alert("Fault in ");
222 switch (regs->int_parm_long & 3) {
223 case 3:
224 pr_cont("home space ");
225 break;
226 case 2:
227 pr_cont("secondary space ");
228 break;
229 case 1:
230 pr_cont("access register ");
231 break;
232 case 0:
233 pr_cont("primary space ");
234 break;
235 }
236 pr_cont("mode while using ");
237 if (!user_space_fault(regs)) {
238 asce = S390_lowcore.kernel_asce;
239 pr_cont("kernel ");
240 }
241#ifdef CONFIG_PGSTE
242 else if ((current->flags & PF_VCPU) && S390_lowcore.gmap) {
243 struct gmap *gmap = (struct gmap *)S390_lowcore.gmap;
244 asce = gmap->asce;
245 pr_cont("gmap ");
246 }
247#endif
248 else {
249 asce = S390_lowcore.user_asce;
250 pr_cont("user ");
251 }
252 pr_cont("ASCE.\n");
253 dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK);
254}
255
129static inline void report_user_fault(struct pt_regs *regs, long signr) 256static inline void report_user_fault(struct pt_regs *regs, long signr)
130{ 257{
131 if ((task_pid_nr(current) > 1) && !show_unhandled_signals) 258 if ((task_pid_nr(current) > 1) && !show_unhandled_signals)
@@ -138,8 +265,9 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
138 regs->int_code); 265 regs->int_code);
139 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
140 printk(KERN_CONT "\n"); 267 printk(KERN_CONT "\n");
141 printk(KERN_ALERT "failing address: %lX\n", 268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
142 regs->int_parm_long & __FAIL_ADDR_MASK); 269 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
270 dump_fault_info(regs);
143 show_regs(regs); 271 show_regs(regs);
144} 272}
145 273
@@ -177,11 +305,13 @@ static noinline void do_no_context(struct pt_regs *regs)
177 address = regs->int_parm_long & __FAIL_ADDR_MASK; 305 address = regs->int_parm_long & __FAIL_ADDR_MASK;
178 if (!user_space_fault(regs)) 306 if (!user_space_fault(regs))
179 printk(KERN_ALERT "Unable to handle kernel pointer dereference" 307 printk(KERN_ALERT "Unable to handle kernel pointer dereference"
180 " at virtual kernel address %p\n", (void *)address); 308 " in virtual kernel address space\n");
181 else 309 else
182 printk(KERN_ALERT "Unable to handle kernel paging request" 310 printk(KERN_ALERT "Unable to handle kernel paging request"
183 " at virtual user address %p\n", (void *)address); 311 " in virtual user address space\n");
184 312 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
313 regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
314 dump_fault_info(regs);
185 die(regs, "Oops"); 315 die(regs, "Oops");
186 do_exit(SIGKILL); 316 do_exit(SIGKILL);
187} 317}
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 602f57e590b5..d1b7c377a234 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -250,8 +250,8 @@ archclean:
250PHONY += kvmconfig 250PHONY += kvmconfig
251kvmconfig: 251kvmconfig:
252 $(if $(wildcard $(objtree)/.config),, $(error You need an existing .config for this target)) 252 $(if $(wildcard $(objtree)/.config),, $(error You need an existing .config for this target))
253 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m -O $(objtree) $(objtree)/.config arch/x86/configs/kvm_guest.config 253 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh -m -O $(objtree) $(objtree)/.config $(srctree)/arch/x86/configs/kvm_guest.config
254 $(Q)yes "" | $(MAKE) oldconfig 254 $(Q)yes "" | $(MAKE) -f $(srctree)/Makefile oldconfig
255 255
256define archhelp 256define archhelp
257 echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)' 257 echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fcaf9c961265..7de069afb382 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -60,7 +60,7 @@
60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \ 60 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ 61 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \
62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ 62 | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \
63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE)) 63 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE | X86_CR4_SMAP))
64 64
65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) 65#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
66 66
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index eeee23ff75ef..68317c80de7f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -598,7 +598,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
598{ 598{
599 struct mce m; 599 struct mce m;
600 int i; 600 int i;
601 unsigned long *v;
602 601
603 this_cpu_inc(mce_poll_count); 602 this_cpu_inc(mce_poll_count);
604 603
@@ -618,8 +617,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
618 if (!(m.status & MCI_STATUS_VAL)) 617 if (!(m.status & MCI_STATUS_VAL))
619 continue; 618 continue;
620 619
621 v = &get_cpu_var(mce_polled_error); 620 this_cpu_write(mce_polled_error, 1);
622 set_bit(0, v);
623 /* 621 /*
624 * Uncorrected or signalled events are handled by the exception 622 * Uncorrected or signalled events are handled by the exception
625 * handler when it is enabled, so don't process those here. 623 * handler when it is enabled, so don't process those here.
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 3bdb95ae8c43..9a316b21df8b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
42 * cmci_discover_lock protects against parallel discovery attempts 42 * cmci_discover_lock protects against parallel discovery attempts
43 * which could race against each other. 43 * which could race against each other.
44 */ 44 */
45static DEFINE_RAW_SPINLOCK(cmci_discover_lock); 45static DEFINE_SPINLOCK(cmci_discover_lock);
46 46
47#define CMCI_THRESHOLD 1 47#define CMCI_THRESHOLD 1
48#define CMCI_POLL_INTERVAL (30 * HZ) 48#define CMCI_POLL_INTERVAL (30 * HZ)
@@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void)
144 int bank; 144 int bank;
145 u64 val; 145 u64 val;
146 146
147 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 147 spin_lock_irqsave(&cmci_discover_lock, flags);
148 owned = __get_cpu_var(mce_banks_owned); 148 owned = __get_cpu_var(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) { 149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val); 150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN; 151 val &= ~MCI_CTL2_CMCI_EN;
152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val); 152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
153 } 153 }
154 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 154 spin_unlock_irqrestore(&cmci_discover_lock, flags);
155} 155}
156 156
157static bool cmci_storm_detect(void) 157static bool cmci_storm_detect(void)
@@ -211,7 +211,7 @@ static void cmci_discover(int banks)
211 int i; 211 int i;
212 int bios_wrong_thresh = 0; 212 int bios_wrong_thresh = 0;
213 213
214 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 214 spin_lock_irqsave(&cmci_discover_lock, flags);
215 for (i = 0; i < banks; i++) { 215 for (i = 0; i < banks; i++) {
216 u64 val; 216 u64 val;
217 int bios_zero_thresh = 0; 217 int bios_zero_thresh = 0;
@@ -266,7 +266,7 @@ static void cmci_discover(int banks)
266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); 266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
267 } 267 }
268 } 268 }
269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 269 spin_unlock_irqrestore(&cmci_discover_lock, flags);
270 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { 270 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
271 pr_info_once( 271 pr_info_once(
272 "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); 272 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
@@ -316,10 +316,10 @@ void cmci_clear(void)
316 316
317 if (!cmci_supported(&banks)) 317 if (!cmci_supported(&banks))
318 return; 318 return;
319 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 319 spin_lock_irqsave(&cmci_discover_lock, flags);
320 for (i = 0; i < banks; i++) 320 for (i = 0; i < banks; i++)
321 __cmci_disable_bank(i); 321 __cmci_disable_bank(i);
322 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 322 spin_unlock_irqrestore(&cmci_discover_lock, flags);
323} 323}
324 324
325static void cmci_rediscover_work_func(void *arg) 325static void cmci_rediscover_work_func(void *arg)
@@ -360,9 +360,9 @@ void cmci_disable_bank(int bank)
360 if (!cmci_supported(&banks)) 360 if (!cmci_supported(&banks))
361 return; 361 return;
362 362
363 raw_spin_lock_irqsave(&cmci_discover_lock, flags); 363 spin_lock_irqsave(&cmci_discover_lock, flags);
364 __cmci_disable_bank(bank); 364 __cmci_disable_bank(bank);
365 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); 365 spin_unlock_irqrestore(&cmci_discover_lock, flags);
366} 366}
367 367
368static void intel_init_cmci(void) 368static void intel_init_cmci(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 059218ed5208..7c87424d4140 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -59,7 +59,7 @@
59#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */ 59#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
60#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */ 60#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
61#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */ 61#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
62#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */ 62#define RAPL_IDX_PP1_NRG_STAT 3 /* gpu */
63#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */ 63#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
64 64
65/* Clients have PP0, PKG */ 65/* Clients have PP0, PKG */
@@ -72,6 +72,12 @@
72 1<<RAPL_IDX_PKG_NRG_STAT|\ 72 1<<RAPL_IDX_PKG_NRG_STAT|\
73 1<<RAPL_IDX_RAM_NRG_STAT) 73 1<<RAPL_IDX_RAM_NRG_STAT)
74 74
75/* Servers have PP0, PKG, RAM, PP1 */
76#define RAPL_IDX_HSW (1<<RAPL_IDX_PP0_NRG_STAT|\
77 1<<RAPL_IDX_PKG_NRG_STAT|\
78 1<<RAPL_IDX_RAM_NRG_STAT|\
79 1<<RAPL_IDX_PP1_NRG_STAT)
80
75/* 81/*
76 * event code: LSB 8 bits, passed in attr->config 82 * event code: LSB 8 bits, passed in attr->config
77 * any other bit is reserved 83 * any other bit is reserved
@@ -425,6 +431,24 @@ static struct attribute *rapl_events_cln_attr[] = {
425 NULL, 431 NULL,
426}; 432};
427 433
434static struct attribute *rapl_events_hsw_attr[] = {
435 EVENT_PTR(rapl_cores),
436 EVENT_PTR(rapl_pkg),
437 EVENT_PTR(rapl_gpu),
438 EVENT_PTR(rapl_ram),
439
440 EVENT_PTR(rapl_cores_unit),
441 EVENT_PTR(rapl_pkg_unit),
442 EVENT_PTR(rapl_gpu_unit),
443 EVENT_PTR(rapl_ram_unit),
444
445 EVENT_PTR(rapl_cores_scale),
446 EVENT_PTR(rapl_pkg_scale),
447 EVENT_PTR(rapl_gpu_scale),
448 EVENT_PTR(rapl_ram_scale),
449 NULL,
450};
451
428static struct attribute_group rapl_pmu_events_group = { 452static struct attribute_group rapl_pmu_events_group = {
429 .name = "events", 453 .name = "events",
430 .attrs = NULL, /* patched at runtime */ 454 .attrs = NULL, /* patched at runtime */
@@ -511,6 +535,7 @@ static int rapl_cpu_prepare(int cpu)
511 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); 535 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
512 int phys_id = topology_physical_package_id(cpu); 536 int phys_id = topology_physical_package_id(cpu);
513 u64 ms; 537 u64 ms;
538 u64 msr_rapl_power_unit_bits;
514 539
515 if (pmu) 540 if (pmu)
516 return 0; 541 return 0;
@@ -518,6 +543,9 @@ static int rapl_cpu_prepare(int cpu)
518 if (phys_id < 0) 543 if (phys_id < 0)
519 return -1; 544 return -1;
520 545
546 if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
547 return -1;
548
521 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); 549 pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
522 if (!pmu) 550 if (!pmu)
523 return -1; 551 return -1;
@@ -531,8 +559,7 @@ static int rapl_cpu_prepare(int cpu)
531 * 559 *
532 * we cache in local PMU instance 560 * we cache in local PMU instance
533 */ 561 */
534 rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit); 562 pmu->hw_unit = (msr_rapl_power_unit_bits >> 8) & 0x1FULL;
535 pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
536 pmu->pmu = &rapl_pmu_class; 563 pmu->pmu = &rapl_pmu_class;
537 564
538 /* 565 /*
@@ -631,11 +658,14 @@ static int __init rapl_pmu_init(void)
631 switch (boot_cpu_data.x86_model) { 658 switch (boot_cpu_data.x86_model) {
632 case 42: /* Sandy Bridge */ 659 case 42: /* Sandy Bridge */
633 case 58: /* Ivy Bridge */ 660 case 58: /* Ivy Bridge */
634 case 60: /* Haswell */
635 case 69: /* Haswell-Celeron */
636 rapl_cntr_mask = RAPL_IDX_CLN; 661 rapl_cntr_mask = RAPL_IDX_CLN;
637 rapl_pmu_events_group.attrs = rapl_events_cln_attr; 662 rapl_pmu_events_group.attrs = rapl_events_cln_attr;
638 break; 663 break;
664 case 60: /* Haswell */
665 case 69: /* Haswell-Celeron */
666 rapl_cntr_mask = RAPL_IDX_HSW;
667 rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
668 break;
639 case 45: /* Sandy Bridge-EP */ 669 case 45: /* Sandy Bridge-EP */
640 case 62: /* IvyTown */ 670 case 62: /* IvyTown */
641 rapl_cntr_mask = RAPL_IDX_SRV; 671 rapl_cntr_mask = RAPL_IDX_SRV;
@@ -650,7 +680,9 @@ static int __init rapl_pmu_init(void)
650 cpu_notifier_register_begin(); 680 cpu_notifier_register_begin();
651 681
652 for_each_online_cpu(cpu) { 682 for_each_online_cpu(cpu) {
653 rapl_cpu_prepare(cpu); 683 ret = rapl_cpu_prepare(cpu);
684 if (ret)
685 goto out;
654 rapl_cpu_init(cpu); 686 rapl_cpu_init(cpu);
655 } 687 }
656 688
@@ -673,6 +705,7 @@ static int __init rapl_pmu_init(void)
673 hweight32(rapl_cntr_mask), 705 hweight32(rapl_cntr_mask),
674 ktime_to_ms(pmu->timer_interval)); 706 ktime_to_ms(pmu->timer_interval));
675 707
708out:
676 cpu_notifier_register_done(); 709 cpu_notifier_register_done();
677 710
678 return 0; 711 return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index b0cc3809723d..6e2537c32190 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -240,7 +240,7 @@ static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_s
240 return base; 240 return base;
241} 241}
242 242
243#define KB(x) ((x) * 1024) 243#define KB(x) ((x) * 1024UL)
244#define MB(x) (KB (KB (x))) 244#define MB(x) (KB (KB (x)))
245#define GB(x) (MB (KB (x))) 245#define GB(x) (MB (KB (x)))
246 246
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 79a3f9682871..61b17dc2c277 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -897,9 +897,10 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
897 struct kprobe *cur = kprobe_running(); 897 struct kprobe *cur = kprobe_running();
898 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 898 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
899 899
900 switch (kcb->kprobe_status) { 900 if (unlikely(regs->ip == (unsigned long)cur->ainsn.insn)) {
901 case KPROBE_HIT_SS: 901 /* This must happen on single-stepping */
902 case KPROBE_REENTER: 902 WARN_ON(kcb->kprobe_status != KPROBE_HIT_SS &&
903 kcb->kprobe_status != KPROBE_REENTER);
903 /* 904 /*
904 * We are here because the instruction being single 905 * We are here because the instruction being single
905 * stepped caused a page fault. We reset the current 906 * stepped caused a page fault. We reset the current
@@ -914,9 +915,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
914 else 915 else
915 reset_current_kprobe(); 916 reset_current_kprobe();
916 preempt_enable_no_resched(); 917 preempt_enable_no_resched();
917 break; 918 } else if (kcb->kprobe_status == KPROBE_HIT_ACTIVE ||
918 case KPROBE_HIT_ACTIVE: 919 kcb->kprobe_status == KPROBE_HIT_SSDONE) {
919 case KPROBE_HIT_SSDONE:
920 /* 920 /*
921 * We increment the nmissed count for accounting, 921 * We increment the nmissed count for accounting,
922 * we can also use npre/npostfault count for accounting 922 * we can also use npre/npostfault count for accounting
@@ -945,10 +945,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
945 * fixup routine could not handle it, 945 * fixup routine could not handle it,
946 * Let do_page_fault() fix it. 946 * Let do_page_fault() fix it.
947 */ 947 */
948 break;
949 default:
950 break;
951 } 948 }
949
952 return 0; 950 return 0;
953} 951}
954 952
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 654b46574b91..3399d3a99730 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -114,8 +114,8 @@ EXPORT_SYMBOL(machine_real_restart);
114 */ 114 */
115static int __init set_pci_reboot(const struct dmi_system_id *d) 115static int __init set_pci_reboot(const struct dmi_system_id *d)
116{ 116{
117 if (reboot_type != BOOT_CF9) { 117 if (reboot_type != BOOT_CF9_FORCE) {
118 reboot_type = BOOT_CF9; 118 reboot_type = BOOT_CF9_FORCE;
119 pr_info("%s series board detected. Selecting %s-method for reboots.\n", 119 pr_info("%s series board detected. Selecting %s-method for reboots.\n",
120 d->ident, "PCI"); 120 d->ident, "PCI");
121 } 121 }
@@ -458,20 +458,23 @@ void __attribute__((weak)) mach_reboot_fixups(void)
458} 458}
459 459
460/* 460/*
461 * Windows compatible x86 hardware expects the following on reboot: 461 * To the best of our knowledge Windows compatible x86 hardware expects
462 * the following on reboot:
462 * 463 *
463 * 1) If the FADT has the ACPI reboot register flag set, try it 464 * 1) If the FADT has the ACPI reboot register flag set, try it
464 * 2) If still alive, write to the keyboard controller 465 * 2) If still alive, write to the keyboard controller
465 * 3) If still alive, write to the ACPI reboot register again 466 * 3) If still alive, write to the ACPI reboot register again
466 * 4) If still alive, write to the keyboard controller again 467 * 4) If still alive, write to the keyboard controller again
467 * 5) If still alive, call the EFI runtime service to reboot 468 * 5) If still alive, call the EFI runtime service to reboot
468 * 6) If still alive, write to the PCI IO port 0xCF9 to reboot 469 * 6) If no EFI runtime service, call the BIOS to do a reboot
469 * 7) If still alive, inform BIOS to do a proper reboot
470 * 470 *
471 * If the machine is still alive at this stage, it gives up. We default to 471 * We default to following the same pattern. We also have
472 * following the same pattern, except that if we're still alive after (7) we'll 472 * two other reboot methods: 'triple fault' and 'PCI', which
473 * try to force a triple fault and then cycle between hitting the keyboard 473 * can be triggered via the reboot= kernel boot option or
474 * controller and doing that 474 * via quirks.
475 *
476 * This means that this function can never return, it can misbehave
477 * by not rebooting properly and hanging.
475 */ 478 */
476static void native_machine_emergency_restart(void) 479static void native_machine_emergency_restart(void)
477{ 480{
@@ -492,6 +495,11 @@ static void native_machine_emergency_restart(void)
492 for (;;) { 495 for (;;) {
493 /* Could also try the reset bit in the Hammer NB */ 496 /* Could also try the reset bit in the Hammer NB */
494 switch (reboot_type) { 497 switch (reboot_type) {
498 case BOOT_ACPI:
499 acpi_reboot();
500 reboot_type = BOOT_KBD;
501 break;
502
495 case BOOT_KBD: 503 case BOOT_KBD:
496 mach_reboot_fixups(); /* For board specific fixups */ 504 mach_reboot_fixups(); /* For board specific fixups */
497 505
@@ -509,43 +517,29 @@ static void native_machine_emergency_restart(void)
509 } 517 }
510 break; 518 break;
511 519
512 case BOOT_TRIPLE:
513 load_idt(&no_idt);
514 __asm__ __volatile__("int3");
515
516 /* We're probably dead after this, but... */
517 reboot_type = BOOT_KBD;
518 break;
519
520 case BOOT_BIOS:
521 machine_real_restart(MRR_BIOS);
522
523 /* We're probably dead after this, but... */
524 reboot_type = BOOT_TRIPLE;
525 break;
526
527 case BOOT_ACPI:
528 acpi_reboot();
529 reboot_type = BOOT_KBD;
530 break;
531
532 case BOOT_EFI: 520 case BOOT_EFI:
533 if (efi_enabled(EFI_RUNTIME_SERVICES)) 521 if (efi_enabled(EFI_RUNTIME_SERVICES))
534 efi.reset_system(reboot_mode == REBOOT_WARM ? 522 efi.reset_system(reboot_mode == REBOOT_WARM ?
535 EFI_RESET_WARM : 523 EFI_RESET_WARM :
536 EFI_RESET_COLD, 524 EFI_RESET_COLD,
537 EFI_SUCCESS, 0, NULL); 525 EFI_SUCCESS, 0, NULL);
538 reboot_type = BOOT_CF9_COND; 526 reboot_type = BOOT_BIOS;
527 break;
528
529 case BOOT_BIOS:
530 machine_real_restart(MRR_BIOS);
531
532 /* We're probably dead after this, but... */
533 reboot_type = BOOT_CF9_SAFE;
539 break; 534 break;
540 535
541 case BOOT_CF9: 536 case BOOT_CF9_FORCE:
542 port_cf9_safe = true; 537 port_cf9_safe = true;
543 /* Fall through */ 538 /* Fall through */
544 539
545 case BOOT_CF9_COND: 540 case BOOT_CF9_SAFE:
546 if (port_cf9_safe) { 541 if (port_cf9_safe) {
547 u8 reboot_code = reboot_mode == REBOOT_WARM ? 542 u8 reboot_code = reboot_mode == REBOOT_WARM ? 0x06 : 0x0E;
548 0x06 : 0x0E;
549 u8 cf9 = inb(0xcf9) & ~reboot_code; 543 u8 cf9 = inb(0xcf9) & ~reboot_code;
550 outb(cf9|2, 0xcf9); /* Request hard reset */ 544 outb(cf9|2, 0xcf9); /* Request hard reset */
551 udelay(50); 545 udelay(50);
@@ -553,7 +547,15 @@ static void native_machine_emergency_restart(void)
553 outb(cf9|reboot_code, 0xcf9); 547 outb(cf9|reboot_code, 0xcf9);
554 udelay(50); 548 udelay(50);
555 } 549 }
556 reboot_type = BOOT_BIOS; 550 reboot_type = BOOT_TRIPLE;
551 break;
552
553 case BOOT_TRIPLE:
554 load_idt(&no_idt);
555 __asm__ __volatile__("int3");
556
557 /* We're probably dead after this, but... */
558 reboot_type = BOOT_KBD;
557 break; 559 break;
558 } 560 }
559 } 561 }
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index bea60671ef8a..f47a104a749c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -308,7 +308,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
308 const u32 kvm_supported_word9_x86_features = 308 const u32 kvm_supported_word9_x86_features =
309 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) | 309 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
310 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) | 310 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
311 F(ADX); 311 F(ADX) | F(SMAP);
312 312
313 /* all calls to cpuid_count() should be made on the same cpu */ 313 /* all calls to cpuid_count() should be made on the same cpu */
314 get_cpu(); 314 get_cpu();
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index a2a1bb7ed8c1..eeecbed26ac7 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -48,6 +48,14 @@ static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
48 return best && (best->ebx & bit(X86_FEATURE_SMEP)); 48 return best && (best->ebx & bit(X86_FEATURE_SMEP));
49} 49}
50 50
51static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
52{
53 struct kvm_cpuid_entry2 *best;
54
55 best = kvm_find_cpuid_entry(vcpu, 7, 0);
56 return best && (best->ebx & bit(X86_FEATURE_SMAP));
57}
58
51static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) 59static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
52{ 60{
53 struct kvm_cpuid_entry2 *best; 61 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f5704d9e5ddc..813d31038b93 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3601,20 +3601,27 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
3601 } 3601 }
3602} 3602}
3603 3603
3604static void update_permission_bitmask(struct kvm_vcpu *vcpu, 3604void update_permission_bitmask(struct kvm_vcpu *vcpu,
3605 struct kvm_mmu *mmu, bool ept) 3605 struct kvm_mmu *mmu, bool ept)
3606{ 3606{
3607 unsigned bit, byte, pfec; 3607 unsigned bit, byte, pfec;
3608 u8 map; 3608 u8 map;
3609 bool fault, x, w, u, wf, uf, ff, smep; 3609 bool fault, x, w, u, wf, uf, ff, smapf, cr4_smap, cr4_smep, smap = 0;
3610 3610
3611 smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP); 3611 cr4_smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
3612 cr4_smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
3612 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { 3613 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
3613 pfec = byte << 1; 3614 pfec = byte << 1;
3614 map = 0; 3615 map = 0;
3615 wf = pfec & PFERR_WRITE_MASK; 3616 wf = pfec & PFERR_WRITE_MASK;
3616 uf = pfec & PFERR_USER_MASK; 3617 uf = pfec & PFERR_USER_MASK;
3617 ff = pfec & PFERR_FETCH_MASK; 3618 ff = pfec & PFERR_FETCH_MASK;
3619 /*
3620 * PFERR_RSVD_MASK bit is set in PFEC if the access is not
3621 * subject to SMAP restrictions, and cleared otherwise. The
3622 * bit is only meaningful if the SMAP bit is set in CR4.
3623 */
3624 smapf = !(pfec & PFERR_RSVD_MASK);
3618 for (bit = 0; bit < 8; ++bit) { 3625 for (bit = 0; bit < 8; ++bit) {
3619 x = bit & ACC_EXEC_MASK; 3626 x = bit & ACC_EXEC_MASK;
3620 w = bit & ACC_WRITE_MASK; 3627 w = bit & ACC_WRITE_MASK;
@@ -3626,12 +3633,33 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
3626 /* Allow supervisor writes if !cr0.wp */ 3633 /* Allow supervisor writes if !cr0.wp */
3627 w |= !is_write_protection(vcpu) && !uf; 3634 w |= !is_write_protection(vcpu) && !uf;
3628 /* Disallow supervisor fetches of user code if cr4.smep */ 3635 /* Disallow supervisor fetches of user code if cr4.smep */
3629 x &= !(smep && u && !uf); 3636 x &= !(cr4_smep && u && !uf);
3637
3638 /*
3639 * SMAP:kernel-mode data accesses from user-mode
3640 * mappings should fault. A fault is considered
3641 * as a SMAP violation if all of the following
3642 * conditions are ture:
3643 * - X86_CR4_SMAP is set in CR4
3644 * - An user page is accessed
3645 * - Page fault in kernel mode
3646 * - if CPL = 3 or X86_EFLAGS_AC is clear
3647 *
3648 * Here, we cover the first three conditions.
3649 * The fourth is computed dynamically in
3650 * permission_fault() and is in smapf.
3651 *
3652 * Also, SMAP does not affect instruction
3653 * fetches, add the !ff check here to make it
3654 * clearer.
3655 */
3656 smap = cr4_smap && u && !uf && !ff;
3630 } else 3657 } else
3631 /* Not really needed: no U/S accesses on ept */ 3658 /* Not really needed: no U/S accesses on ept */
3632 u = 1; 3659 u = 1;
3633 3660
3634 fault = (ff && !x) || (uf && !u) || (wf && !w); 3661 fault = (ff && !x) || (uf && !u) || (wf && !w) ||
3662 (smapf && smap);
3635 map |= fault << bit; 3663 map |= fault << bit;
3636 } 3664 }
3637 mmu->permissions[byte] = map; 3665 mmu->permissions[byte] = map;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 292615274358..3842e70bdb7c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -44,11 +44,17 @@
44#define PT_DIRECTORY_LEVEL 2 44#define PT_DIRECTORY_LEVEL 2
45#define PT_PAGE_TABLE_LEVEL 1 45#define PT_PAGE_TABLE_LEVEL 1
46 46
47#define PFERR_PRESENT_MASK (1U << 0) 47#define PFERR_PRESENT_BIT 0
48#define PFERR_WRITE_MASK (1U << 1) 48#define PFERR_WRITE_BIT 1
49#define PFERR_USER_MASK (1U << 2) 49#define PFERR_USER_BIT 2
50#define PFERR_RSVD_MASK (1U << 3) 50#define PFERR_RSVD_BIT 3
51#define PFERR_FETCH_MASK (1U << 4) 51#define PFERR_FETCH_BIT 4
52
53#define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT)
54#define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT)
55#define PFERR_USER_MASK (1U << PFERR_USER_BIT)
56#define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT)
57#define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT)
52 58
53int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 59int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
54void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); 60void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask);
@@ -73,6 +79,8 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
73void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); 79void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
74void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, 80void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
75 bool execonly); 81 bool execonly);
82void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
83 bool ept);
76 84
77static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) 85static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
78{ 86{
@@ -110,10 +118,30 @@ static inline bool is_write_protection(struct kvm_vcpu *vcpu)
110 * Will a fault with a given page-fault error code (pfec) cause a permission 118 * Will a fault with a given page-fault error code (pfec) cause a permission
111 * fault with the given access (in ACC_* format)? 119 * fault with the given access (in ACC_* format)?
112 */ 120 */
113static inline bool permission_fault(struct kvm_mmu *mmu, unsigned pte_access, 121static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
114 unsigned pfec) 122 unsigned pte_access, unsigned pfec)
115{ 123{
116 return (mmu->permissions[pfec >> 1] >> pte_access) & 1; 124 int cpl = kvm_x86_ops->get_cpl(vcpu);
125 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
126
127 /*
128 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
129 *
130 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
131 * (these are implicit supervisor accesses) regardless of the value
132 * of EFLAGS.AC.
133 *
134 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
135 * the result in X86_EFLAGS_AC. We then insert it in place of
136 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
137 * but it will be one in index if SMAP checks are being overridden.
138 * It is important to keep this branchless.
139 */
140 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
141 int index = (pfec >> 1) +
142 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
143
144 return (mmu->permissions[index] >> pte_access) & 1;
117} 145}
118 146
119void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); 147void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index b1e6c1bf68d3..123efd3ec29f 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -353,7 +353,7 @@ retry_walk:
353 walker->ptes[walker->level - 1] = pte; 353 walker->ptes[walker->level - 1] = pte;
354 } while (!is_last_gpte(mmu, walker->level, pte)); 354 } while (!is_last_gpte(mmu, walker->level, pte));
355 355
356 if (unlikely(permission_fault(mmu, pte_access, access))) { 356 if (unlikely(permission_fault(vcpu, mmu, pte_access, access))) {
357 errcode |= PFERR_PRESENT_MASK; 357 errcode |= PFERR_PRESENT_MASK;
358 goto error; 358 goto error;
359 } 359 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1320e0f8e611..1f68c5831924 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3484,13 +3484,14 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3484 hw_cr4 &= ~X86_CR4_PAE; 3484 hw_cr4 &= ~X86_CR4_PAE;
3485 hw_cr4 |= X86_CR4_PSE; 3485 hw_cr4 |= X86_CR4_PSE;
3486 /* 3486 /*
3487 * SMEP is disabled if CPU is in non-paging mode in 3487 * SMEP/SMAP is disabled if CPU is in non-paging mode
3488 * hardware. However KVM always uses paging mode to 3488 * in hardware. However KVM always uses paging mode to
3489 * emulate guest non-paging mode with TDP. 3489 * emulate guest non-paging mode with TDP.
3490 * To emulate this behavior, SMEP needs to be manually 3490 * To emulate this behavior, SMEP/SMAP needs to be
3491 * disabled when guest switches to non-paging mode. 3491 * manually disabled when guest switches to non-paging
3492 * mode.
3492 */ 3493 */
3493 hw_cr4 &= ~X86_CR4_SMEP; 3494 hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
3494 } else if (!(cr4 & X86_CR4_PAE)) { 3495 } else if (!(cr4 & X86_CR4_PAE)) {
3495 hw_cr4 &= ~X86_CR4_PAE; 3496 hw_cr4 &= ~X86_CR4_PAE;
3496 } 3497 }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9d1b5cd4d34c..8b8fc0b792ba 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -652,6 +652,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
652 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP)) 652 if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
653 return 1; 653 return 1;
654 654
655 if (!guest_cpuid_has_smap(vcpu) && (cr4 & X86_CR4_SMAP))
656 return 1;
657
655 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE)) 658 if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_FSGSBASE))
656 return 1; 659 return 1;
657 660
@@ -680,6 +683,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
680 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) 683 (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
681 kvm_mmu_reset_context(vcpu); 684 kvm_mmu_reset_context(vcpu);
682 685
686 if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
687 update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
688
683 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE) 689 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
684 kvm_update_cpuid(vcpu); 690 kvm_update_cpuid(vcpu);
685 691
@@ -1117,7 +1123,6 @@ static inline u64 get_kernel_ns(void)
1117{ 1123{
1118 struct timespec ts; 1124 struct timespec ts;
1119 1125
1120 WARN_ON(preemptible());
1121 ktime_get_ts(&ts); 1126 ktime_get_ts(&ts);
1122 monotonic_to_bootbased(&ts); 1127 monotonic_to_bootbased(&ts);
1123 return timespec_to_ns(&ts); 1128 return timespec_to_ns(&ts);
@@ -4164,7 +4169,8 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
4164 | (write ? PFERR_WRITE_MASK : 0); 4169 | (write ? PFERR_WRITE_MASK : 0);
4165 4170
4166 if (vcpu_match_mmio_gva(vcpu, gva) 4171 if (vcpu_match_mmio_gva(vcpu, gva)
4167 && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { 4172 && !permission_fault(vcpu, vcpu->arch.walk_mmu,
4173 vcpu->arch.access, access)) {
4168 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | 4174 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
4169 (gva & (PAGE_SIZE - 1)); 4175 (gva & (PAGE_SIZE - 1));
4170 trace_vcpu_match_mmio(gva, *gpa, write, false); 4176 trace_vcpu_match_mmio(gva, *gpa, write, false);
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
index f325af26107c..3323c2745248 100644
--- a/arch/x86/syscalls/Makefile
+++ b/arch/x86/syscalls/Makefile
@@ -54,5 +54,7 @@ syshdr-$(CONFIG_X86_64) += syscalls_64.h
54 54
55targets += $(uapisyshdr-y) $(syshdr-y) 55targets += $(uapisyshdr-y) $(syshdr-y)
56 56
57PHONY += all
57all: $(addprefix $(uapi)/,$(uapisyshdr-y)) 58all: $(addprefix $(uapi)/,$(uapisyshdr-y))
58all: $(addprefix $(out)/,$(syshdr-y)) 59all: $(addprefix $(out)/,$(syshdr-y))
60 @:
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 96bc506ac6de..d6b867921612 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -359,3 +359,4 @@
359350 i386 finit_module sys_finit_module 359350 i386 finit_module sys_finit_module
360351 i386 sched_setattr sys_sched_setattr 360351 i386 sched_setattr sys_sched_setattr
361352 i386 sched_getattr sys_sched_getattr 361352 i386 sched_getattr sys_sched_getattr
362353 i386 renameat2 sys_renameat2
diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
index e8120346903b..604a37efd4d5 100644
--- a/arch/x86/tools/Makefile
+++ b/arch/x86/tools/Makefile
@@ -40,4 +40,6 @@ $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/ina
40HOST_EXTRACFLAGS += -I$(srctree)/tools/include 40HOST_EXTRACFLAGS += -I$(srctree)/tools/include
41hostprogs-y += relocs 41hostprogs-y += relocs
42relocs-objs := relocs_32.o relocs_64.o relocs_common.o 42relocs-objs := relocs_32.o relocs_64.o relocs_common.o
43PHONY += relocs
43relocs: $(obj)/relocs 44relocs: $(obj)/relocs
45 @:
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index a18eadd8bb40..7005974c3ff3 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -441,10 +441,11 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
441 irq_ctx_init(cpu); 441 irq_ctx_init(cpu);
442#else 442#else
443 clear_tsk_thread_flag(idle, TIF_FORK); 443 clear_tsk_thread_flag(idle, TIF_FORK);
444#endif
444 per_cpu(kernel_stack, cpu) = 445 per_cpu(kernel_stack, cpu) =
445 (unsigned long)task_stack_page(idle) - 446 (unsigned long)task_stack_page(idle) -
446 KERNEL_STACK_OFFSET + THREAD_SIZE; 447 KERNEL_STACK_OFFSET + THREAD_SIZE;
447#endif 448
448 xen_setup_runstate_info(cpu); 449 xen_setup_runstate_info(cpu);
449 xen_setup_timer(cpu); 450 xen_setup_timer(cpu);
450 xen_init_lock_cpu(cpu); 451 xen_init_lock_cpu(cpu);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 4d3acc34a998..0ba5f3b967f0 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -274,7 +274,7 @@ void __init xen_init_spinlocks(void)
274 printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); 274 printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
275 return; 275 return;
276 } 276 }
277 277 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 278 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
279 pv_lock_ops.unlock_kick = xen_unlock_kick; 279 pv_lock_ops.unlock_kick = xen_unlock_kick;
280} 280}
@@ -290,6 +290,9 @@ static __init int xen_init_spinlocks_jump(void)
290 if (!xen_pvspin) 290 if (!xen_pvspin)
291 return 0; 291 return 0;
292 292
293 if (!xen_domain())
294 return 0;
295
293 static_key_slow_inc(&paravirt_ticketlocks_enabled); 296 static_key_slow_inc(&paravirt_ticketlocks_enabled);
294 return 0; 297 return 0;
295} 298}
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 33ca6e42a4ca..fd92a64d748e 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -75,6 +75,17 @@ ENDPROC(xen_sysexit)
75 * stack state in whatever form its in, we keep things simple by only 75 * stack state in whatever form its in, we keep things simple by only
76 * using a single register which is pushed/popped on the stack. 76 * using a single register which is pushed/popped on the stack.
77 */ 77 */
78
79.macro POP_FS
801:
81 popw %fs
82.pushsection .fixup, "ax"
832: movw $0, (%esp)
84 jmp 1b
85.popsection
86 _ASM_EXTABLE(1b,2b)
87.endm
88
78ENTRY(xen_iret) 89ENTRY(xen_iret)
79 /* test eflags for special cases */ 90 /* test eflags for special cases */
80 testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp) 91 testl $(X86_EFLAGS_VM | XEN_EFLAGS_NMI), 8(%esp)
@@ -83,15 +94,13 @@ ENTRY(xen_iret)
83 push %eax 94 push %eax
84 ESP_OFFSET=4 # bytes pushed onto stack 95 ESP_OFFSET=4 # bytes pushed onto stack
85 96
86 /* 97 /* Store vcpu_info pointer for easy access */
87 * Store vcpu_info pointer for easy access. Do it this way to
88 * avoid having to reload %fs
89 */
90#ifdef CONFIG_SMP 98#ifdef CONFIG_SMP
91 GET_THREAD_INFO(%eax) 99 pushw %fs
92 movl %ss:TI_cpu(%eax), %eax 100 movl $(__KERNEL_PERCPU), %eax
93 movl %ss:__per_cpu_offset(,%eax,4), %eax 101 movl %eax, %fs
94 mov %ss:xen_vcpu(%eax), %eax 102 movl %fs:xen_vcpu, %eax
103 POP_FS
95#else 104#else
96 movl %ss:xen_vcpu, %eax 105 movl %ss:xen_vcpu, %eax
97#endif 106#endif