summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 18:52:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-22 18:52:04 -0400
commit23b7776290b10297fe2cae0fb5f166a4f2c68121 (patch)
tree73d1e76644a20bc7bff80fbfdb08e8b9a9f28420 /arch
parent6bc4c3ad3619e1bcb4a6330e030007ace8ca465e (diff)
parent6fab54101923044712baee429ff573f03b99fc47 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes are: - lockless wakeup support for futexes and IPC message queues (Davidlohr Bueso, Peter Zijlstra) - Replace spinlocks with atomics in thread_group_cputimer(), to improve scalability (Jason Low) - NUMA balancing improvements (Rik van Riel) - SCHED_DEADLINE improvements (Wanpeng Li) - clean up and reorganize preemption helpers (Frederic Weisbecker) - decouple page fault disabling machinery from the preemption counter, to improve debuggability and robustness (David Hildenbrand) - SCHED_DEADLINE documentation updates (Luca Abeni) - topology CPU masks cleanups (Bartosz Golaszewski) - /proc/sched_debug improvements (Srikar Dronamraju)" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (79 commits) sched/deadline: Remove needless parameter in dl_runtime_exceeded() sched: Remove superfluous resetting of the p->dl_throttled flag sched/deadline: Drop duplicate init_sched_dl_class() declaration sched/deadline: Reduce rq lock contention by eliminating locking of non-feasible target sched/deadline: Make init_sched_dl_class() __init sched/deadline: Optimize pull_dl_task() sched/preempt: Add static_key() to preempt_notifiers sched/preempt: Fix preempt notifiers documentation about hlist_del() within unsafe iteration sched/stop_machine: Fix deadlock between multiple stop_two_cpus() sched/debug: Add sum_sleep_runtime to /proc/<pid>/sched sched/debug: Replace vruntime with wait_sum in /proc/sched_debug sched/debug: Properly format runnable tasks in /proc/sched_debug sched/numa: Only consider less busy nodes as numa balancing destinations Revert 095bebf61a46 ("sched/numa: Do not move past the balance point if unbalanced") sched/fair: Prevent throttling in early pick_next_task_fair() preempt: Reorganize the notrace definitions a bit preempt: Use preempt_schedule_context() as the official tracing preemption point sched: Make preempt_schedule_context() function-tracing safe x86: Remove cpu_sibling_mask() and cpu_core_mask() x86: Replace cpu_**_mask() with topology_**_cpumask() ...
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/mm/fault.c5
-rw-r--r--arch/arc/include/asm/futex.h10
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/include/asm/futex.h13
-rw-r--r--arch/arm/include/asm/topology.h2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm64/include/asm/futex.h4
-rw-r--r--arch/arm64/include/asm/topology.h2
-rw-r--r--arch/arm64/mm/fault.c2
-rw-r--r--arch/avr32/include/asm/uaccess.h12
-rw-r--r--arch/avr32/mm/fault.c4
-rw-r--r--arch/cris/mm/fault.c6
-rw-r--r--arch/frv/mm/fault.c4
-rw-r--r--arch/frv/mm/highmem.c2
-rw-r--r--arch/hexagon/include/asm/uaccess.h3
-rw-r--r--arch/ia64/include/asm/topology.h2
-rw-r--r--arch/ia64/mm/fault.c4
-rw-r--r--arch/m32r/include/asm/uaccess.h30
-rw-r--r--arch/m32r/mm/fault.c8
-rw-r--r--arch/m68k/include/asm/irqflags.h3
-rw-r--r--arch/m68k/mm/fault.c4
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/metag/mm/highmem.c4
-rw-r--r--arch/microblaze/include/asm/uaccess.h6
-rw-r--r--arch/microblaze/mm/fault.c8
-rw-r--r--arch/microblaze/mm/highmem.c4
-rw-r--r--arch/mips/include/asm/topology.h2
-rw-r--r--arch/mips/include/asm/uaccess.h45
-rw-r--r--arch/mips/kernel/signal-common.h9
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/highmem.c5
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mn10300/include/asm/highmem.h3
-rw-r--r--arch/mn10300/mm/fault.c4
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/parisc/include/asm/cacheflush.h2
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/parisc/mm/fault.c4
-rw-r--r--arch/powerpc/include/asm/topology.h2
-rw-r--r--arch/powerpc/lib/vmx-helper.c11
-rw-r--r--arch/powerpc/mm/fault.c9
-rw-r--r--arch/powerpc/mm/highmem.c4
-rw-r--r--arch/powerpc/mm/tlb_nohash.c2
-rw-r--r--arch/s390/include/asm/topology.h3
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/score/include/asm/uaccess.h15
-rw-r--r--arch/score/mm/fault.c3
-rw-r--r--arch/sh/mm/fault.c5
-rw-r--r--arch/sparc/include/asm/topology_64.h2
-rw-r--r--arch/sparc/mm/fault_32.c4
-rw-r--r--arch/sparc/mm/fault_64.c4
-rw-r--r--arch/sparc/mm/highmem.c4
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/tile/include/asm/topology.h2
-rw-r--r--arch/tile/include/asm/uaccess.h18
-rw-r--r--arch/tile/mm/fault.c4
-rw-r--r--arch/tile/mm/highmem.c3
-rw-r--r--arch/um/kernel/trap.c5
-rw-r--r--arch/unicore32/mm/fault.c2
-rw-r--r--arch/x86/include/asm/preempt.h8
-rw-r--r--arch/x86/include/asm/smp.h10
-rw-r--r--arch/x86/include/asm/topology.h2
-rw-r--r--arch/x86/include/asm/uaccess.h15
-rw-r--r--arch/x86/include/asm/uaccess_32.h6
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/proc.c3
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c4
-rw-r--r--arch/x86/kernel/process.c7
-rw-r--r--arch/x86/kernel/smpboot.c42
-rw-r--r--arch/x86/kernel/tsc_sync.c2
-rw-r--r--arch/x86/kernel/x8664_ksyms_64.c4
-rw-r--r--arch/x86/lib/thunk_32.S4
-rw-r--r--arch/x86/lib/thunk_64.S4
-rw-r--r--arch/x86/lib/usercopy_32.c6
-rw-r--r--arch/x86/mm/fault.c5
-rw-r--r--arch/x86/mm/highmem_32.c3
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/xtensa/mm/fault.c4
-rw-r--r--arch/xtensa/mm/highmem.c2
81 files changed, 286 insertions, 211 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 9d0ac091a52a..4a905bd667e2 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -23,8 +23,7 @@
23#include <linux/smp.h> 23#include <linux/smp.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/module.h> 25#include <linux/module.h>
26 26#include <linux/uaccess.h>
27#include <asm/uaccess.h>
28 27
29extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *); 28extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
30 29
@@ -107,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
107 106
108 /* If we're in an interrupt context, or have no user context, 107 /* If we're in an interrupt context, or have no user context,
109 we must not take the fault. */ 108 we must not take the fault. */
110 if (!mm || in_atomic()) 109 if (!mm || faulthandler_disabled())
111 goto no_context; 110 goto no_context;
112 111
113#ifdef CONFIG_ALPHA_LARGE_VMALLOC 112#ifdef CONFIG_ALPHA_LARGE_VMALLOC
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 4dc64ddebece..05b5aaf5b0f9 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -53,7 +53,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
53 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 53 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
54 return -EFAULT; 54 return -EFAULT;
55 55
56 pagefault_disable(); /* implies preempt_disable() */ 56 pagefault_disable();
57 57
58 switch (op) { 58 switch (op) {
59 case FUTEX_OP_SET: 59 case FUTEX_OP_SET:
@@ -75,7 +75,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
75 ret = -ENOSYS; 75 ret = -ENOSYS;
76 } 76 }
77 77
78 pagefault_enable(); /* subsumes preempt_enable() */ 78 pagefault_enable();
79 79
80 if (!ret) { 80 if (!ret) {
81 switch (cmp) { 81 switch (cmp) {
@@ -104,7 +104,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
104 return ret; 104 return ret;
105} 105}
106 106
107/* Compare-xchg with preemption disabled. 107/* Compare-xchg with pagefaults disabled.
108 * Notes: 108 * Notes:
109 * -Best-Effort: Exchg happens only if compare succeeds. 109 * -Best-Effort: Exchg happens only if compare succeeds.
110 * If compare fails, returns; leaving retry/looping to upper layers 110 * If compare fails, returns; leaving retry/looping to upper layers
@@ -121,7 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
121 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 121 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
122 return -EFAULT; 122 return -EFAULT;
123 123
124 pagefault_disable(); /* implies preempt_disable() */ 124 pagefault_disable();
125 125
126 /* TBD : can use llock/scond */ 126 /* TBD : can use llock/scond */
127 __asm__ __volatile__( 127 __asm__ __volatile__(
@@ -142,7 +142,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
142 : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT) 142 : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
143 : "cc", "memory"); 143 : "cc", "memory");
144 144
145 pagefault_enable(); /* subsumes preempt_enable() */ 145 pagefault_enable();
146 146
147 *uval = val; 147 *uval = val;
148 return val; 148 return val;
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6a2e006cbcce..d948e4e9d89c 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -86,7 +86,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
86 * If we're in an interrupt or have no user 86 * If we're in an interrupt or have no user
87 * context, we must not take the fault.. 87 * context, we must not take the fault..
88 */ 88 */
89 if (in_atomic() || !mm) 89 if (faulthandler_disabled() || !mm)
90 goto no_context; 90 goto no_context;
91 91
92 if (user_mode(regs)) 92 if (user_mode(regs))
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 4e78065a16aa..5eed82809d82 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
93 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 93 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
94 return -EFAULT; 94 return -EFAULT;
95 95
96 preempt_disable();
96 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 97 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
97 "1: " TUSER(ldr) " %1, [%4]\n" 98 "1: " TUSER(ldr) " %1, [%4]\n"
98 " teq %1, %2\n" 99 " teq %1, %2\n"
@@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
104 : "cc", "memory"); 105 : "cc", "memory");
105 106
106 *uval = val; 107 *uval = val;
108 preempt_enable();
109
107 return ret; 110 return ret;
108} 111}
109 112
@@ -124,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
124 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
125 return -EFAULT; 128 return -EFAULT;
126 129
127 pagefault_disable(); /* implies preempt_disable() */ 130#ifndef CONFIG_SMP
131 preempt_disable();
132#endif
133 pagefault_disable();
128 134
129 switch (op) { 135 switch (op) {
130 case FUTEX_OP_SET: 136 case FUTEX_OP_SET:
@@ -146,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
146 ret = -ENOSYS; 152 ret = -ENOSYS;
147 } 153 }
148 154
149 pagefault_enable(); /* subsumes preempt_enable() */ 155 pagefault_enable();
156#ifndef CONFIG_SMP
157 preempt_enable();
158#endif
150 159
151 if (!ret) { 160 if (!ret) {
152 switch (cmp) { 161 switch (cmp) {
diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h
index 2fe85fff5cca..370f7a732900 100644
--- a/arch/arm/include/asm/topology.h
+++ b/arch/arm/include/asm/topology.h
@@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
18#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 18#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id)
19#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 19#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) 20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) 21#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22 22
23void init_cpu_topology(void); 23void init_cpu_topology(void);
24void store_cpu_topology(unsigned int cpuid); 24void store_cpu_topology(unsigned int cpuid);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 6333d9c17875..0d629b8f973f 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
276 * If we're in an interrupt or have no user 276 * If we're in an interrupt or have no user
277 * context, we must not take the fault.. 277 * context, we must not take the fault..
278 */ 278 */
279 if (in_atomic() || !mm) 279 if (faulthandler_disabled() || !mm)
280 goto no_context; 280 goto no_context;
281 281
282 if (user_mode(regs)) 282 if (user_mode(regs))
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index b98895d9fe57..ee8dfa793989 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
59 void *kmap; 59 void *kmap;
60 int type; 60 int type;
61 61
62 preempt_disable();
62 pagefault_disable(); 63 pagefault_disable();
63 if (!PageHighMem(page)) 64 if (!PageHighMem(page))
64 return page_address(page); 65 return page_address(page);
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
121 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); 122 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
122 } 123 }
123 pagefault_enable(); 124 pagefault_enable();
125 preempt_enable();
124} 126}
125EXPORT_SYMBOL(__kunmap_atomic); 127EXPORT_SYMBOL(__kunmap_atomic);
126 128
@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
130 int idx, type; 132 int idx, type;
131 struct page *page = pfn_to_page(pfn); 133 struct page *page = pfn_to_page(pfn);
132 134
135 preempt_disable();
133 pagefault_disable(); 136 pagefault_disable();
134 if (!PageHighMem(page)) 137 if (!PageHighMem(page))
135 return page_address(page); 138 return page_address(page);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 5f750dc96e0f..74069b3bd919 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
58 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 58 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
59 return -EFAULT; 59 return -EFAULT;
60 60
61 pagefault_disable(); /* implies preempt_disable() */ 61 pagefault_disable();
62 62
63 switch (op) { 63 switch (op) {
64 case FUTEX_OP_SET: 64 case FUTEX_OP_SET:
@@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
85 ret = -ENOSYS; 85 ret = -ENOSYS;
86 } 86 }
87 87
88 pagefault_enable(); /* subsumes preempt_enable() */ 88 pagefault_enable();
89 89
90 if (!ret) { 90 if (!ret) {
91 switch (cmp) { 91 switch (cmp) {
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 7ebcd31ce51c..225ec3524fbf 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
18#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id) 18#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
19#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 19#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) 20#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
21#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) 21#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
22 22
23void init_cpu_topology(void); 23void init_cpu_topology(void);
24void store_cpu_topology(unsigned int cpuid); 24void store_cpu_topology(unsigned int cpuid);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 96da13167d4a..0948d327d013 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -211,7 +211,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
211 * If we're in an interrupt or have no user context, we must not take 211 * If we're in an interrupt or have no user context, we must not take
212 * the fault. 212 * the fault.
213 */ 213 */
214 if (in_atomic() || !mm) 214 if (faulthandler_disabled() || !mm)
215 goto no_context; 215 goto no_context;
216 216
217 if (user_mode(regs)) 217 if (user_mode(regs))
diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h
index a46f7cf3e1ea..68cf638faf48 100644
--- a/arch/avr32/include/asm/uaccess.h
+++ b/arch/avr32/include/asm/uaccess.h
@@ -97,7 +97,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
97 * @x: Value to copy to user space. 97 * @x: Value to copy to user space.
98 * @ptr: Destination address, in user space. 98 * @ptr: Destination address, in user space.
99 * 99 *
100 * Context: User context only. This function may sleep. 100 * Context: User context only. This function may sleep if pagefaults are
101 * enabled.
101 * 102 *
102 * This macro copies a single simple value from kernel space to user 103 * This macro copies a single simple value from kernel space to user
103 * space. It supports simple types like char and int, but not larger 104 * space. It supports simple types like char and int, but not larger
@@ -116,7 +117,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
116 * @x: Variable to store result. 117 * @x: Variable to store result.
117 * @ptr: Source address, in user space. 118 * @ptr: Source address, in user space.
118 * 119 *
119 * Context: User context only. This function may sleep. 120 * Context: User context only. This function may sleep if pagefaults are
121 * enabled.
120 * 122 *
121 * This macro copies a single simple variable from user space to kernel 123 * This macro copies a single simple variable from user space to kernel
122 * space. It supports simple types like char and int, but not larger 124 * space. It supports simple types like char and int, but not larger
@@ -136,7 +138,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
136 * @x: Value to copy to user space. 138 * @x: Value to copy to user space.
137 * @ptr: Destination address, in user space. 139 * @ptr: Destination address, in user space.
138 * 140 *
139 * Context: User context only. This function may sleep. 141 * Context: User context only. This function may sleep if pagefaults are
142 * enabled.
140 * 143 *
141 * This macro copies a single simple value from kernel space to user 144 * This macro copies a single simple value from kernel space to user
142 * space. It supports simple types like char and int, but not larger 145 * space. It supports simple types like char and int, but not larger
@@ -158,7 +161,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
158 * @x: Variable to store result. 161 * @x: Variable to store result.
159 * @ptr: Source address, in user space. 162 * @ptr: Source address, in user space.
160 * 163 *
161 * Context: User context only. This function may sleep. 164 * Context: User context only. This function may sleep if pagefaults are
165 * enabled.
162 * 166 *
163 * This macro copies a single simple variable from user space to kernel 167 * This macro copies a single simple variable from user space to kernel
164 * space. It supports simple types like char and int, but not larger 168 * space. It supports simple types like char and int, but not larger
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index d223a8b57c1e..c03533937a9f 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -14,11 +14,11 @@
14#include <linux/pagemap.h> 14#include <linux/pagemap.h>
15#include <linux/kdebug.h> 15#include <linux/kdebug.h>
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/uaccess.h>
17 18
18#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
19#include <asm/sysreg.h> 20#include <asm/sysreg.h>
20#include <asm/tlb.h> 21#include <asm/tlb.h>
21#include <asm/uaccess.h>
22 22
23#ifdef CONFIG_KPROBES 23#ifdef CONFIG_KPROBES
24static inline int notify_page_fault(struct pt_regs *regs, int trap) 24static inline int notify_page_fault(struct pt_regs *regs, int trap)
@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
81 * If we're in an interrupt or have no user context, we must 81 * If we're in an interrupt or have no user context, we must
82 * not take the fault... 82 * not take the fault...
83 */ 83 */
84 if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM)) 84 if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
85 goto no_context; 85 goto no_context;
86 86
87 local_irq_enable(); 87 local_irq_enable();
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 83f12f2ed9e3..3066d40a6db1 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -8,7 +8,7 @@
8#include <linux/interrupt.h> 8#include <linux/interrupt.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <asm/uaccess.h> 11#include <linux/uaccess.h>
12#include <arch/system.h> 12#include <arch/system.h>
13 13
14extern int find_fixup_code(struct pt_regs *); 14extern int find_fixup_code(struct pt_regs *);
@@ -109,11 +109,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
109 info.si_code = SEGV_MAPERR; 109 info.si_code = SEGV_MAPERR;
110 110
111 /* 111 /*
112 * If we're in an interrupt or "atomic" operation or have no 112 * If we're in an interrupt, have pagefaults disabled or have no
113 * user context, we must not take the fault. 113 * user context, we must not take the fault.
114 */ 114 */
115 115
116 if (in_atomic() || !mm) 116 if (faulthandler_disabled() || !mm)
117 goto no_context; 117 goto no_context;
118 118
119 if (user_mode(regs)) 119 if (user_mode(regs))
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index ec4917ddf678..61d99767fe16 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -19,9 +19,9 @@
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/ptrace.h> 20#include <linux/ptrace.h>
21#include <linux/hardirq.h> 21#include <linux/hardirq.h>
22#include <linux/uaccess.h>
22 23
23#include <asm/pgtable.h> 24#include <asm/pgtable.h>
24#include <asm/uaccess.h>
25#include <asm/gdb-stub.h> 25#include <asm/gdb-stub.h>
26 26
27/*****************************************************************************/ 27/*****************************************************************************/
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
78 * If we're in an interrupt or have no user 78 * If we're in an interrupt or have no user
79 * context, we must not take the fault.. 79 * context, we must not take the fault..
80 */ 80 */
81 if (in_atomic() || !mm) 81 if (faulthandler_disabled() || !mm)
82 goto no_context; 82 goto no_context;
83 83
84 if (user_mode(__frame)) 84 if (user_mode(__frame))
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index bed9a9bd3c10..785344bbdc07 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
42 unsigned long paddr; 42 unsigned long paddr;
43 int type; 43 int type;
44 44
45 preempt_disable();
45 pagefault_disable(); 46 pagefault_disable();
46 type = kmap_atomic_idx_push(); 47 type = kmap_atomic_idx_push();
47 paddr = page_to_phys(page); 48 paddr = page_to_phys(page);
@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
85 } 86 }
86 kmap_atomic_idx_pop(); 87 kmap_atomic_idx_pop();
87 pagefault_enable(); 88 pagefault_enable();
89 preempt_enable();
88} 90}
89EXPORT_SYMBOL(__kunmap_atomic); 91EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h
index e4127e4d6a5b..f000a382bc7f 100644
--- a/arch/hexagon/include/asm/uaccess.h
+++ b/arch/hexagon/include/asm/uaccess.h
@@ -36,7 +36,8 @@
36 * @addr: User space pointer to start of block to check 36 * @addr: User space pointer to start of block to check
37 * @size: Size of block to check 37 * @size: Size of block to check
38 * 38 *
39 * Context: User context only. This function may sleep. 39 * Context: User context only. This function may sleep if pagefaults are
40 * enabled.
40 * 41 *
41 * Checks if a pointer to a block of memory in user space is valid. 42 * Checks if a pointer to a block of memory in user space is valid.
42 * 43 *
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 6437ca21f61b..3ad8f6988363 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -53,7 +53,7 @@ void build_cpu_to_node_map(void);
53#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 53#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
54#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 54#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
55#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 55#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
56#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 56#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
57#endif 57#endif
58 58
59extern void arch_fix_phys_package_id(int num, u32 slot); 59extern void arch_fix_phys_package_id(int num, u32 slot);
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index ba5ba7accd0d..70b40d1205a6 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -11,10 +11,10 @@
11#include <linux/kprobes.h> 11#include <linux/kprobes.h>
12#include <linux/kdebug.h> 12#include <linux/kdebug.h>
13#include <linux/prefetch.h> 13#include <linux/prefetch.h>
14#include <linux/uaccess.h>
14 15
15#include <asm/pgtable.h> 16#include <asm/pgtable.h>
16#include <asm/processor.h> 17#include <asm/processor.h>
17#include <asm/uaccess.h>
18 18
19extern int die(char *, struct pt_regs *, long); 19extern int die(char *, struct pt_regs *, long);
20 20
@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
96 /* 96 /*
97 * If we're in an interrupt or have no user context, we must not take the fault.. 97 * If we're in an interrupt or have no user context, we must not take the fault..
98 */ 98 */
99 if (in_atomic() || !mm) 99 if (faulthandler_disabled() || !mm)
100 goto no_context; 100 goto no_context;
101 101
102#ifdef CONFIG_VIRTUAL_MEM_MAP 102#ifdef CONFIG_VIRTUAL_MEM_MAP
diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h
index 71adff209405..cac7014daef3 100644
--- a/arch/m32r/include/asm/uaccess.h
+++ b/arch/m32r/include/asm/uaccess.h
@@ -91,7 +91,8 @@ static inline void set_fs(mm_segment_t s)
91 * @addr: User space pointer to start of block to check 91 * @addr: User space pointer to start of block to check
92 * @size: Size of block to check 92 * @size: Size of block to check
93 * 93 *
94 * Context: User context only. This function may sleep. 94 * Context: User context only. This function may sleep if pagefaults are
95 * enabled.
95 * 96 *
96 * Checks if a pointer to a block of memory in user space is valid. 97 * Checks if a pointer to a block of memory in user space is valid.
97 * 98 *
@@ -155,7 +156,8 @@ extern int fixup_exception(struct pt_regs *regs);
155 * @x: Variable to store result. 156 * @x: Variable to store result.
156 * @ptr: Source address, in user space. 157 * @ptr: Source address, in user space.
157 * 158 *
158 * Context: User context only. This function may sleep. 159 * Context: User context only. This function may sleep if pagefaults are
160 * enabled.
159 * 161 *
160 * This macro copies a single simple variable from user space to kernel 162 * This macro copies a single simple variable from user space to kernel
161 * space. It supports simple types like char and int, but not larger 163 * space. It supports simple types like char and int, but not larger
@@ -175,7 +177,8 @@ extern int fixup_exception(struct pt_regs *regs);
175 * @x: Value to copy to user space. 177 * @x: Value to copy to user space.
176 * @ptr: Destination address, in user space. 178 * @ptr: Destination address, in user space.
177 * 179 *
178 * Context: User context only. This function may sleep. 180 * Context: User context only. This function may sleep if pagefaults are
181 * enabled.
179 * 182 *
180 * This macro copies a single simple value from kernel space to user 183 * This macro copies a single simple value from kernel space to user
181 * space. It supports simple types like char and int, but not larger 184 * space. It supports simple types like char and int, but not larger
@@ -194,7 +197,8 @@ extern int fixup_exception(struct pt_regs *regs);
194 * @x: Variable to store result. 197 * @x: Variable to store result.
195 * @ptr: Source address, in user space. 198 * @ptr: Source address, in user space.
196 * 199 *
197 * Context: User context only. This function may sleep. 200 * Context: User context only. This function may sleep if pagefaults are
201 * enabled.
198 * 202 *
199 * This macro copies a single simple variable from user space to kernel 203 * This macro copies a single simple variable from user space to kernel
200 * space. It supports simple types like char and int, but not larger 204 * space. It supports simple types like char and int, but not larger
@@ -274,7 +278,8 @@ do { \
274 * @x: Value to copy to user space. 278 * @x: Value to copy to user space.
275 * @ptr: Destination address, in user space. 279 * @ptr: Destination address, in user space.
276 * 280 *
277 * Context: User context only. This function may sleep. 281 * Context: User context only. This function may sleep if pagefaults are
282 * enabled.
278 * 283 *
279 * This macro copies a single simple value from kernel space to user 284 * This macro copies a single simple value from kernel space to user
280 * space. It supports simple types like char and int, but not larger 285 * space. It supports simple types like char and int, but not larger
@@ -568,7 +573,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
568 * @from: Source address, in kernel space. 573 * @from: Source address, in kernel space.
569 * @n: Number of bytes to copy. 574 * @n: Number of bytes to copy.
570 * 575 *
571 * Context: User context only. This function may sleep. 576 * Context: User context only. This function may sleep if pagefaults are
577 * enabled.
572 * 578 *
573 * Copy data from kernel space to user space. Caller must check 579 * Copy data from kernel space to user space. Caller must check
574 * the specified block with access_ok() before calling this function. 580 * the specified block with access_ok() before calling this function.
@@ -588,7 +594,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
588 * @from: Source address, in kernel space. 594 * @from: Source address, in kernel space.
589 * @n: Number of bytes to copy. 595 * @n: Number of bytes to copy.
590 * 596 *
591 * Context: User context only. This function may sleep. 597 * Context: User context only. This function may sleep if pagefaults are
598 * enabled.
592 * 599 *
593 * Copy data from kernel space to user space. 600 * Copy data from kernel space to user space.
594 * 601 *
@@ -606,7 +613,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
606 * @from: Source address, in user space. 613 * @from: Source address, in user space.
607 * @n: Number of bytes to copy. 614 * @n: Number of bytes to copy.
608 * 615 *
609 * Context: User context only. This function may sleep. 616 * Context: User context only. This function may sleep if pagefaults are
617 * enabled.
610 * 618 *
611 * Copy data from user space to kernel space. Caller must check 619 * Copy data from user space to kernel space. Caller must check
612 * the specified block with access_ok() before calling this function. 620 * the specified block with access_ok() before calling this function.
@@ -626,7 +634,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
626 * @from: Source address, in user space. 634 * @from: Source address, in user space.
627 * @n: Number of bytes to copy. 635 * @n: Number of bytes to copy.
628 * 636 *
629 * Context: User context only. This function may sleep. 637 * Context: User context only. This function may sleep if pagefaults are
638 * enabled.
630 * 639 *
631 * Copy data from user space to kernel space. 640 * Copy data from user space to kernel space.
632 * 641 *
@@ -677,7 +686,8 @@ unsigned long clear_user(void __user *mem, unsigned long len);
677 * strlen_user: - Get the size of a string in user space. 686 * strlen_user: - Get the size of a string in user space.
678 * @str: The string to measure. 687 * @str: The string to measure.
679 * 688 *
680 * Context: User context only. This function may sleep. 689 * Context: User context only. This function may sleep if pagefaults are
690 * enabled.
681 * 691 *
682 * Get the size of a NUL-terminated string in user space. 692 * Get the size of a NUL-terminated string in user space.
683 * 693 *
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e3d4d4890104..8f9875b7933d 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -24,9 +24,9 @@
24#include <linux/vt_kern.h> /* For unblank_screen() */ 24#include <linux/vt_kern.h> /* For unblank_screen() */
25#include <linux/highmem.h> 25#include <linux/highmem.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/uaccess.h>
27 28
28#include <asm/m32r.h> 29#include <asm/m32r.h>
29#include <asm/uaccess.h>
30#include <asm/hardirq.h> 30#include <asm/hardirq.h>
31#include <asm/mmu_context.h> 31#include <asm/mmu_context.h>
32#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
@@ -111,10 +111,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
111 mm = tsk->mm; 111 mm = tsk->mm;
112 112
113 /* 113 /*
114 * If we're in an interrupt or have no user context or are running in an 114 * If we're in an interrupt or have no user context or have pagefaults
115 * atomic region then we must not take the fault.. 115 * disabled then we must not take the fault.
116 */ 116 */
117 if (in_atomic() || !mm) 117 if (faulthandler_disabled() || !mm)
118 goto bad_area_nosemaphore; 118 goto bad_area_nosemaphore;
119 119
120 if (error_code & ACE_USERMODE) 120 if (error_code & ACE_USERMODE)
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index a823cd73dc09..b5941818346f 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,9 +2,6 @@
2#define _M68K_IRQFLAGS_H 2#define _M68K_IRQFLAGS_H
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#ifdef CONFIG_MMU
6#include <linux/preempt_mask.h>
7#endif
8#include <linux/preempt.h> 5#include <linux/preempt.h>
9#include <asm/thread_info.h> 6#include <asm/thread_info.h>
10#include <asm/entry.h> 7#include <asm/entry.h>
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index b2f04aee46ec..6a94cdd0c830 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -10,10 +10,10 @@
10#include <linux/ptrace.h> 10#include <linux/ptrace.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/uaccess.h>
13 14
14#include <asm/setup.h> 15#include <asm/setup.h>
15#include <asm/traps.h> 16#include <asm/traps.h>
16#include <asm/uaccess.h>
17#include <asm/pgalloc.h> 17#include <asm/pgalloc.h>
18 18
19extern void die_if_kernel(char *, struct pt_regs *, long); 19extern void die_if_kernel(char *, struct pt_regs *, long);
@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
81 * If we're in an interrupt or have no user 81 * If we're in an interrupt or have no user
82 * context, we must not take the fault.. 82 * context, we must not take the fault..
83 */ 83 */
84 if (in_atomic() || !mm) 84 if (faulthandler_disabled() || !mm)
85 goto no_context; 85 goto no_context;
86 86
87 if (user_mode(regs)) 87 if (user_mode(regs))
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 2de5dc695a87..f57edca63609 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -105,7 +105,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
105 105
106 mm = tsk->mm; 106 mm = tsk->mm;
107 107
108 if (in_atomic() || !mm) 108 if (faulthandler_disabled() || !mm)
109 goto no_context; 109 goto no_context;
110 110
111 if (user_mode(regs)) 111 if (user_mode(regs))
diff --git a/arch/metag/mm/highmem.c b/arch/metag/mm/highmem.c
index d71f621a2c0b..807f1b1c4e65 100644
--- a/arch/metag/mm/highmem.c
+++ b/arch/metag/mm/highmem.c
@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
43 unsigned long vaddr; 43 unsigned long vaddr;
44 int type; 44 int type;
45 45
46 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 46 preempt_disable();
47 pagefault_disable(); 47 pagefault_disable();
48 if (!PageHighMem(page)) 48 if (!PageHighMem(page))
49 return page_address(page); 49 return page_address(page);
@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
82 } 82 }
83 83
84 pagefault_enable(); 84 pagefault_enable();
85 preempt_enable();
85} 86}
86EXPORT_SYMBOL(__kunmap_atomic); 87EXPORT_SYMBOL(__kunmap_atomic);
87 88
@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
95 unsigned long vaddr; 96 unsigned long vaddr;
96 int type; 97 int type;
97 98
99 preempt_disable();
98 pagefault_disable(); 100 pagefault_disable();
99 101
100 type = kmap_atomic_idx_push(); 102 type = kmap_atomic_idx_push();
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 62942fd12672..331b0d35f89c 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -178,7 +178,8 @@ extern long __user_bad(void);
178 * @x: Variable to store result. 178 * @x: Variable to store result.
179 * @ptr: Source address, in user space. 179 * @ptr: Source address, in user space.
180 * 180 *
181 * Context: User context only. This function may sleep. 181 * Context: User context only. This function may sleep if pagefaults are
182 * enabled.
182 * 183 *
183 * This macro copies a single simple variable from user space to kernel 184 * This macro copies a single simple variable from user space to kernel
184 * space. It supports simple types like char and int, but not larger 185 * space. It supports simple types like char and int, but not larger
@@ -290,7 +291,8 @@ extern long __user_bad(void);
290 * @x: Value to copy to user space. 291 * @x: Value to copy to user space.
291 * @ptr: Destination address, in user space. 292 * @ptr: Destination address, in user space.
292 * 293 *
293 * Context: User context only. This function may sleep. 294 * Context: User context only. This function may sleep if pagefaults are
295 * enabled.
294 * 296 *
295 * This macro copies a single simple value from kernel space to user 297 * This macro copies a single simple value from kernel space to user
296 * space. It supports simple types like char and int, but not larger 298 * space. It supports simple types like char and int, but not larger
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d46a5ebb7570..177dfc003643 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -107,14 +107,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
107 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11) 107 if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
108 is_write = 0; 108 is_write = 0;
109 109
110 if (unlikely(in_atomic() || !mm)) { 110 if (unlikely(faulthandler_disabled() || !mm)) {
111 if (kernel_mode(regs)) 111 if (kernel_mode(regs))
112 goto bad_area_nosemaphore; 112 goto bad_area_nosemaphore;
113 113
114 /* in_atomic() in user mode is really bad, 114 /* faulthandler_disabled() in user mode is really bad,
115 as is current->mm == NULL. */ 115 as is current->mm == NULL. */
116 pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n", 116 pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
117 mm); 117 mm);
118 pr_emerg("r15 = %lx MSR = %lx\n", 118 pr_emerg("r15 = %lx MSR = %lx\n",
119 regs->r15, regs->msr); 119 regs->r15, regs->msr);
120 die("Weird page fault", regs, SIGSEGV); 120 die("Weird page fault", regs, SIGSEGV);
diff --git a/arch/microblaze/mm/highmem.c b/arch/microblaze/mm/highmem.c
index 5a92576fad92..2fcc5a52d84d 100644
--- a/arch/microblaze/mm/highmem.c
+++ b/arch/microblaze/mm/highmem.c
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
37 unsigned long vaddr; 37 unsigned long vaddr;
38 int idx, type; 38 int idx, type;
39 39
40 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 40 preempt_disable();
41 pagefault_disable(); 41 pagefault_disable();
42 if (!PageHighMem(page)) 42 if (!PageHighMem(page))
43 return page_address(page); 43 return page_address(page);
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
63 63
64 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 64 if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
65 pagefault_enable(); 65 pagefault_enable();
66 preempt_enable();
66 return; 67 return;
67 } 68 }
68 69
@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
84#endif 85#endif
85 kmap_atomic_idx_pop(); 86 kmap_atomic_idx_pop();
86 pagefault_enable(); 87 pagefault_enable();
88 preempt_enable();
87} 89}
88EXPORT_SYMBOL(__kunmap_atomic); 90EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 3e307ec2afba..7afda4150a59 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -15,7 +15,7 @@
15#define topology_physical_package_id(cpu) (cpu_data[cpu].package) 15#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
16#define topology_core_id(cpu) (cpu_data[cpu].core) 16#define topology_core_id(cpu) (cpu_data[cpu].core)
17#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) 17#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
18#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) 18#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
19#endif 19#endif
20 20
21#endif /* __ASM_TOPOLOGY_H */ 21#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index bf8b32450ef6..9722357d2854 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -103,7 +103,8 @@ extern u64 __ua_limit;
103 * @addr: User space pointer to start of block to check 103 * @addr: User space pointer to start of block to check
104 * @size: Size of block to check 104 * @size: Size of block to check
105 * 105 *
106 * Context: User context only. This function may sleep. 106 * Context: User context only. This function may sleep if pagefaults are
107 * enabled.
107 * 108 *
108 * Checks if a pointer to a block of memory in user space is valid. 109 * Checks if a pointer to a block of memory in user space is valid.
109 * 110 *
@@ -138,7 +139,8 @@ extern u64 __ua_limit;
138 * @x: Value to copy to user space. 139 * @x: Value to copy to user space.
139 * @ptr: Destination address, in user space. 140 * @ptr: Destination address, in user space.
140 * 141 *
141 * Context: User context only. This function may sleep. 142 * Context: User context only. This function may sleep if pagefaults are
143 * enabled.
142 * 144 *
143 * This macro copies a single simple value from kernel space to user 145 * This macro copies a single simple value from kernel space to user
144 * space. It supports simple types like char and int, but not larger 146 * space. It supports simple types like char and int, but not larger
@@ -157,7 +159,8 @@ extern u64 __ua_limit;
157 * @x: Variable to store result. 159 * @x: Variable to store result.
158 * @ptr: Source address, in user space. 160 * @ptr: Source address, in user space.
159 * 161 *
160 * Context: User context only. This function may sleep. 162 * Context: User context only. This function may sleep if pagefaults are
163 * enabled.
161 * 164 *
162 * This macro copies a single simple variable from user space to kernel 165 * This macro copies a single simple variable from user space to kernel
163 * space. It supports simple types like char and int, but not larger 166 * space. It supports simple types like char and int, but not larger
@@ -177,7 +180,8 @@ extern u64 __ua_limit;
177 * @x: Value to copy to user space. 180 * @x: Value to copy to user space.
178 * @ptr: Destination address, in user space. 181 * @ptr: Destination address, in user space.
179 * 182 *
180 * Context: User context only. This function may sleep. 183 * Context: User context only. This function may sleep if pagefaults are
184 * enabled.
181 * 185 *
182 * This macro copies a single simple value from kernel space to user 186 * This macro copies a single simple value from kernel space to user
183 * space. It supports simple types like char and int, but not larger 187 * space. It supports simple types like char and int, but not larger
@@ -199,7 +203,8 @@ extern u64 __ua_limit;
199 * @x: Variable to store result. 203 * @x: Variable to store result.
200 * @ptr: Source address, in user space. 204 * @ptr: Source address, in user space.
201 * 205 *
202 * Context: User context only. This function may sleep. 206 * Context: User context only. This function may sleep if pagefaults are
207 * enabled.
203 * 208 *
204 * This macro copies a single simple variable from user space to kernel 209 * This macro copies a single simple variable from user space to kernel
205 * space. It supports simple types like char and int, but not larger 210 * space. It supports simple types like char and int, but not larger
@@ -498,7 +503,8 @@ extern void __put_user_unknown(void);
498 * @x: Value to copy to user space. 503 * @x: Value to copy to user space.
499 * @ptr: Destination address, in user space. 504 * @ptr: Destination address, in user space.
500 * 505 *
501 * Context: User context only. This function may sleep. 506 * Context: User context only. This function may sleep if pagefaults are
507 * enabled.
502 * 508 *
503 * This macro copies a single simple value from kernel space to user 509 * This macro copies a single simple value from kernel space to user
504 * space. It supports simple types like char and int, but not larger 510 * space. It supports simple types like char and int, but not larger
@@ -517,7 +523,8 @@ extern void __put_user_unknown(void);
517 * @x: Variable to store result. 523 * @x: Variable to store result.
518 * @ptr: Source address, in user space. 524 * @ptr: Source address, in user space.
519 * 525 *
520 * Context: User context only. This function may sleep. 526 * Context: User context only. This function may sleep if pagefaults are
527 * enabled.
521 * 528 *
522 * This macro copies a single simple variable from user space to kernel 529 * This macro copies a single simple variable from user space to kernel
523 * space. It supports simple types like char and int, but not larger 530 * space. It supports simple types like char and int, but not larger
@@ -537,7 +544,8 @@ extern void __put_user_unknown(void);
537 * @x: Value to copy to user space. 544 * @x: Value to copy to user space.
538 * @ptr: Destination address, in user space. 545 * @ptr: Destination address, in user space.
539 * 546 *
540 * Context: User context only. This function may sleep. 547 * Context: User context only. This function may sleep if pagefaults are
548 * enabled.
541 * 549 *
542 * This macro copies a single simple value from kernel space to user 550 * This macro copies a single simple value from kernel space to user
543 * space. It supports simple types like char and int, but not larger 551 * space. It supports simple types like char and int, but not larger
@@ -559,7 +567,8 @@ extern void __put_user_unknown(void);
559 * @x: Variable to store result. 567 * @x: Variable to store result.
560 * @ptr: Source address, in user space. 568 * @ptr: Source address, in user space.
561 * 569 *
562 * Context: User context only. This function may sleep. 570 * Context: User context only. This function may sleep if pagefaults are
571 * enabled.
563 * 572 *
564 * This macro copies a single simple variable from user space to kernel 573 * This macro copies a single simple variable from user space to kernel
565 * space. It supports simple types like char and int, but not larger 574 * space. It supports simple types like char and int, but not larger
@@ -815,7 +824,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
815 * @from: Source address, in kernel space. 824 * @from: Source address, in kernel space.
816 * @n: Number of bytes to copy. 825 * @n: Number of bytes to copy.
817 * 826 *
818 * Context: User context only. This function may sleep. 827 * Context: User context only. This function may sleep if pagefaults are
828 * enabled.
819 * 829 *
820 * Copy data from kernel space to user space. Caller must check 830 * Copy data from kernel space to user space. Caller must check
821 * the specified block with access_ok() before calling this function. 831 * the specified block with access_ok() before calling this function.
@@ -888,7 +898,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
888 * @from: Source address, in kernel space. 898 * @from: Source address, in kernel space.
889 * @n: Number of bytes to copy. 899 * @n: Number of bytes to copy.
890 * 900 *
891 * Context: User context only. This function may sleep. 901 * Context: User context only. This function may sleep if pagefaults are
902 * enabled.
892 * 903 *
893 * Copy data from kernel space to user space. 904 * Copy data from kernel space to user space.
894 * 905 *
@@ -1075,7 +1086,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1075 * @from: Source address, in user space. 1086 * @from: Source address, in user space.
1076 * @n: Number of bytes to copy. 1087 * @n: Number of bytes to copy.
1077 * 1088 *
1078 * Context: User context only. This function may sleep. 1089 * Context: User context only. This function may sleep if pagefaults are
1090 * enabled.
1079 * 1091 *
1080 * Copy data from user space to kernel space. Caller must check 1092 * Copy data from user space to kernel space. Caller must check
1081 * the specified block with access_ok() before calling this function. 1093 * the specified block with access_ok() before calling this function.
@@ -1107,7 +1119,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1107 * @from: Source address, in user space. 1119 * @from: Source address, in user space.
1108 * @n: Number of bytes to copy. 1120 * @n: Number of bytes to copy.
1109 * 1121 *
1110 * Context: User context only. This function may sleep. 1122 * Context: User context only. This function may sleep if pagefaults are
1123 * enabled.
1111 * 1124 *
1112 * Copy data from user space to kernel space. 1125 * Copy data from user space to kernel space.
1113 * 1126 *
@@ -1329,7 +1342,8 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
1329 * strlen_user: - Get the size of a string in user space. 1342 * strlen_user: - Get the size of a string in user space.
1330 * @str: The string to measure. 1343 * @str: The string to measure.
1331 * 1344 *
1332 * Context: User context only. This function may sleep. 1345 * Context: User context only. This function may sleep if pagefaults are
1346 * enabled.
1333 * 1347 *
1334 * Get the size of a NUL-terminated string in user space. 1348 * Get the size of a NUL-terminated string in user space.
1335 * 1349 *
@@ -1398,7 +1412,8 @@ static inline long __strnlen_user(const char __user *s, long n)
1398 * strnlen_user: - Get the size of a string in user space. 1412 * strnlen_user: - Get the size of a string in user space.
1399 * @str: The string to measure. 1413 * @str: The string to measure.
1400 * 1414 *
1401 * Context: User context only. This function may sleep. 1415 * Context: User context only. This function may sleep if pagefaults are
1416 * enabled.
1402 * 1417 *
1403 * Get the size of a NUL-terminated string in user space. 1418 * Get the size of a NUL-terminated string in user space.
1404 * 1419 *
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 06805e09bcd3..0b85f827cd18 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
28extern int fpcsr_pending(unsigned int __user *fpcsr); 28extern int fpcsr_pending(unsigned int __user *fpcsr);
29 29
30/* Make sure we will not lose FPU ownership */ 30/* Make sure we will not lose FPU ownership */
31#ifdef CONFIG_PREEMPT 31#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
32#define lock_fpu_owner() preempt_disable() 32#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
33#define unlock_fpu_owner() preempt_enable()
34#else
35#define lock_fpu_owner() pagefault_disable()
36#define unlock_fpu_owner() pagefault_enable()
37#endif
38 33
39#endif /* __SIGNAL_COMMON_H */ 34#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 7ff8637e530d..36c0f26fac6b 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -21,10 +21,10 @@
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/kprobes.h> 22#include <linux/kprobes.h>
23#include <linux/perf_event.h> 23#include <linux/perf_event.h>
24#include <linux/uaccess.h>
24 25
25#include <asm/branch.h> 26#include <asm/branch.h>
26#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
27#include <asm/uaccess.h>
28#include <asm/ptrace.h> 28#include <asm/ptrace.h>
29#include <asm/highmem.h> /* For VMALLOC_END */ 29#include <asm/highmem.h> /* For VMALLOC_END */
30#include <linux/kdebug.h> 30#include <linux/kdebug.h>
@@ -94,7 +94,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
94 * If we're in an interrupt or have no user 94 * If we're in an interrupt or have no user
95 * context, we must not take the fault.. 95 * context, we must not take the fault..
96 */ 96 */
97 if (in_atomic() || !mm) 97 if (faulthandler_disabled() || !mm)
98 goto bad_area_nosemaphore; 98 goto bad_area_nosemaphore;
99 99
100 if (user_mode(regs)) 100 if (user_mode(regs))
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index da815d295239..11661cbc11a8 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
47 unsigned long vaddr; 47 unsigned long vaddr;
48 int idx, type; 48 int idx, type;
49 49
50 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 50 preempt_disable();
51 pagefault_disable(); 51 pagefault_disable();
52 if (!PageHighMem(page)) 52 if (!PageHighMem(page))
53 return page_address(page); 53 return page_address(page);
@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
72 72
73 if (vaddr < FIXADDR_START) { // FIXME 73 if (vaddr < FIXADDR_START) { // FIXME
74 pagefault_enable(); 74 pagefault_enable();
75 preempt_enable();
75 return; 76 return;
76 } 77 }
77 78
@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
92#endif 93#endif
93 kmap_atomic_idx_pop(); 94 kmap_atomic_idx_pop();
94 pagefault_enable(); 95 pagefault_enable();
96 preempt_enable();
95} 97}
96EXPORT_SYMBOL(__kunmap_atomic); 98EXPORT_SYMBOL(__kunmap_atomic);
97 99
@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
104 unsigned long vaddr; 106 unsigned long vaddr;
105 int idx, type; 107 int idx, type;
106 108
109 preempt_disable();
107 pagefault_disable(); 110 pagefault_disable();
108 111
109 type = kmap_atomic_idx_push(); 112 type = kmap_atomic_idx_push();
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index faa5c9822ecc..198a3147dd7d 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -90,6 +90,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
90 90
91 BUG_ON(Page_dcache_dirty(page)); 91 BUG_ON(Page_dcache_dirty(page));
92 92
93 preempt_disable();
93 pagefault_disable(); 94 pagefault_disable();
94 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); 95 idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
95 idx += in_interrupt() ? FIX_N_COLOURS : 0; 96 idx += in_interrupt() ? FIX_N_COLOURS : 0;
@@ -152,6 +153,7 @@ void kunmap_coherent(void)
152 write_c0_entryhi(old_ctx); 153 write_c0_entryhi(old_ctx);
153 local_irq_restore(flags); 154 local_irq_restore(flags);
154 pagefault_enable(); 155 pagefault_enable();
156 preempt_enable();
155} 157}
156 158
157void copy_user_highpage(struct page *to, struct page *from, 159void copy_user_highpage(struct page *to, struct page *from,
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index 2fbbe4d920aa..1ddea5afba09 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
75 unsigned long vaddr; 75 unsigned long vaddr;
76 int idx, type; 76 int idx, type;
77 77
78 preempt_disable();
78 pagefault_disable(); 79 pagefault_disable();
79 if (page < highmem_start_page) 80 if (page < highmem_start_page)
80 return page_address(page); 81 return page_address(page);
@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
98 99
99 if (vaddr < FIXADDR_START) { /* FIXME */ 100 if (vaddr < FIXADDR_START) { /* FIXME */
100 pagefault_enable(); 101 pagefault_enable();
102 preempt_enable();
101 return; 103 return;
102 } 104 }
103 105
@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
122 124
123 kmap_atomic_idx_pop(); 125 kmap_atomic_idx_pop();
124 pagefault_enable(); 126 pagefault_enable();
127 preempt_enable();
125} 128}
126#endif /* __KERNEL__ */ 129#endif /* __KERNEL__ */
127 130
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 0c2cc5d39c8e..4a1d181ed32f 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -23,8 +23,8 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/vt_kern.h> /* For unblank_screen() */ 25#include <linux/vt_kern.h> /* For unblank_screen() */
26#include <linux/uaccess.h>
26 27
27#include <asm/uaccess.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/hardirq.h> 29#include <asm/hardirq.h>
30#include <asm/cpu-regs.h> 30#include <asm/cpu-regs.h>
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
168 * If we're in an interrupt or have no user 168 * If we're in an interrupt or have no user
169 * context, we must not take the fault.. 169 * context, we must not take the fault..
170 */ 170 */
171 if (in_atomic() || !mm) 171 if (faulthandler_disabled() || !mm)
172 goto no_context; 172 goto no_context;
173 173
174 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) 174 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 0c9b6afe69e9..b51878b0c6b8 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -77,7 +77,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
77 * If we're in an interrupt or have no user 77 * If we're in an interrupt or have no user
78 * context, we must not take the fault.. 78 * context, we must not take the fault..
79 */ 79 */
80 if (in_atomic() || !mm) 80 if (faulthandler_disabled() || !mm)
81 goto bad_area_nosemaphore; 81 goto bad_area_nosemaphore;
82 82
83 if (user_mode(regs)) 83 if (user_mode(regs))
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index de65f66ea64e..ec2df4bab302 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
142 142
143static inline void *kmap_atomic(struct page *page) 143static inline void *kmap_atomic(struct page *page)
144{ 144{
145 preempt_disable();
145 pagefault_disable(); 146 pagefault_disable();
146 return page_address(page); 147 return page_address(page);
147} 148}
@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
150{ 151{
151 flush_kernel_dcache_page_addr(addr); 152 flush_kernel_dcache_page_addr(addr);
152 pagefault_enable(); 153 pagefault_enable();
154 preempt_enable();
153} 155}
154 156
155#define kmap_atomic_prot(page, prot) kmap_atomic(page) 157#define kmap_atomic_prot(page, prot) kmap_atomic(page)
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 47ee620d15d2..6548fd1d2e62 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -26,9 +26,9 @@
26#include <linux/console.h> 26#include <linux/console.h>
27#include <linux/bug.h> 27#include <linux/bug.h>
28#include <linux/ratelimit.h> 28#include <linux/ratelimit.h>
29#include <linux/uaccess.h>
29 30
30#include <asm/assembly.h> 31#include <asm/assembly.h>
31#include <asm/uaccess.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/irq.h> 33#include <asm/irq.h>
34#include <asm/traps.h> 34#include <asm/traps.h>
@@ -800,7 +800,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
800 * unless pagefault_disable() was called before. 800 * unless pagefault_disable() was called before.
801 */ 801 */
802 802
803 if (fault_space == 0 && !in_atomic()) 803 if (fault_space == 0 && !faulthandler_disabled())
804 { 804 {
805 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); 805 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
806 parisc_terminate("Kernel Fault", regs, code, fault_address); 806 parisc_terminate("Kernel Fault", regs, code, fault_address);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index e5120e653240..15503adddf4f 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -15,8 +15,8 @@
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/uaccess.h>
18 19
19#include <asm/uaccess.h>
20#include <asm/traps.h> 20#include <asm/traps.h>
21 21
22/* Various important other fields */ 22/* Various important other fields */
@@ -207,7 +207,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
207 int fault; 207 int fault;
208 unsigned int flags; 208 unsigned int flags;
209 209
210 if (in_atomic()) 210 if (pagefault_disabled())
211 goto no_context; 211 goto no_context;
212 212
213 tsk = current; 213 tsk = current;
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 5f1048eaa5b6..8b3b46b7b0f2 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
87#include <asm/smp.h> 87#include <asm/smp.h>
88 88
89#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu)) 89#define topology_physical_package_id(cpu) (cpu_to_chip_id(cpu))
90#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 90#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
91#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 91#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
92#define topology_core_id(cpu) (cpu_to_core_id(cpu)) 92#define topology_core_id(cpu) (cpu_to_core_id(cpu))
93#endif 93#endif
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 3cf529ceec5b..ac93a3bd2730 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
27 if (in_interrupt()) 27 if (in_interrupt())
28 return 0; 28 return 0;
29 29
30 /* This acts as preempt_disable() as well and will make 30 preempt_disable();
31 * enable_kernel_altivec(). We need to disable page faults 31 /*
32 * as they can call schedule and thus make us lose the VMX 32 * We need to disable page faults as they can call schedule and
33 * context. So on page faults, we just fail which will cause 33 * thus make us lose the VMX context. So on page faults, we just
34 * a fallback to the normal non-vmx copy. 34 * fail which will cause a fallback to the normal non-vmx copy.
35 */ 35 */
36 pagefault_disable(); 36 pagefault_disable();
37 37
@@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
47int exit_vmx_usercopy(void) 47int exit_vmx_usercopy(void)
48{ 48{
49 pagefault_enable(); 49 pagefault_enable();
50 preempt_enable();
50 return 0; 51 return 0;
51} 52}
52 53
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index b396868d2aa7..6d535973b200 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -33,13 +33,13 @@
33#include <linux/ratelimit.h> 33#include <linux/ratelimit.h>
34#include <linux/context_tracking.h> 34#include <linux/context_tracking.h>
35#include <linux/hugetlb.h> 35#include <linux/hugetlb.h>
36#include <linux/uaccess.h>
36 37
37#include <asm/firmware.h> 38#include <asm/firmware.h>
38#include <asm/page.h> 39#include <asm/page.h>
39#include <asm/pgtable.h> 40#include <asm/pgtable.h>
40#include <asm/mmu.h> 41#include <asm/mmu.h>
41#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
42#include <asm/uaccess.h>
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44#include <asm/siginfo.h> 44#include <asm/siginfo.h>
45#include <asm/debug.h> 45#include <asm/debug.h>
@@ -272,15 +272,16 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
272 if (!arch_irq_disabled_regs(regs)) 272 if (!arch_irq_disabled_regs(regs))
273 local_irq_enable(); 273 local_irq_enable();
274 274
275 if (in_atomic() || mm == NULL) { 275 if (faulthandler_disabled() || mm == NULL) {
276 if (!user_mode(regs)) { 276 if (!user_mode(regs)) {
277 rc = SIGSEGV; 277 rc = SIGSEGV;
278 goto bail; 278 goto bail;
279 } 279 }
280 /* in_atomic() in user mode is really bad, 280 /* faulthandler_disabled() in user mode is really bad,
281 as is current->mm == NULL. */ 281 as is current->mm == NULL. */
282 printk(KERN_EMERG "Page fault in user mode with " 282 printk(KERN_EMERG "Page fault in user mode with "
283 "in_atomic() = %d mm = %p\n", in_atomic(), mm); 283 "faulthandler_disabled() = %d mm = %p\n",
284 faulthandler_disabled(), mm);
284 printk(KERN_EMERG "NIP = %lx MSR = %lx\n", 285 printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
285 regs->nip, regs->msr); 286 regs->nip, regs->msr);
286 die("Weird page fault", regs, SIGSEGV); 287 die("Weird page fault", regs, SIGSEGV);
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c
index e7450bdbe83a..e292c8a60952 100644
--- a/arch/powerpc/mm/highmem.c
+++ b/arch/powerpc/mm/highmem.c
@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
34 unsigned long vaddr; 34 unsigned long vaddr;
35 int idx, type; 35 int idx, type;
36 36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 preempt_disable();
38 pagefault_disable(); 38 pagefault_disable();
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
59 59
60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) { 60 if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
61 pagefault_enable(); 61 pagefault_enable();
62 preempt_enable();
62 return; 63 return;
63 } 64 }
64 65
@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
82 83
83 kmap_atomic_idx_pop(); 84 kmap_atomic_idx_pop();
84 pagefault_enable(); 85 pagefault_enable();
86 preempt_enable();
85} 87}
86EXPORT_SYMBOL(__kunmap_atomic); 88EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index cbd3d069897f..723a099f6be3 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -217,7 +217,7 @@ static DEFINE_RAW_SPINLOCK(tlbivax_lock);
217static int mm_is_core_local(struct mm_struct *mm) 217static int mm_is_core_local(struct mm_struct *mm)
218{ 218{
219 return cpumask_subset(mm_cpumask(mm), 219 return cpumask_subset(mm_cpumask(mm),
220 topology_thread_cpumask(smp_processor_id())); 220 topology_sibling_cpumask(smp_processor_id()));
221} 221}
222 222
223struct tlb_flush_param { 223struct tlb_flush_param {
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index b1453a2ae1ca..4990f6c66288 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -22,7 +22,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
22 22
23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) 23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) 24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
25#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask) 25#define topology_sibling_cpumask(cpu) \
26 (&per_cpu(cpu_topology, cpu).thread_mask)
26#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) 27#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
27#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) 28#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) 29#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index d64a7a62164f..9dd4cc47ddc7 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -98,7 +98,8 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
98 * @from: Source address, in user space. 98 * @from: Source address, in user space.
99 * @n: Number of bytes to copy. 99 * @n: Number of bytes to copy.
100 * 100 *
101 * Context: User context only. This function may sleep. 101 * Context: User context only. This function may sleep if pagefaults are
102 * enabled.
102 * 103 *
103 * Copy data from user space to kernel space. Caller must check 104 * Copy data from user space to kernel space. Caller must check
104 * the specified block with access_ok() before calling this function. 105 * the specified block with access_ok() before calling this function.
@@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_user(void *to, const void __user *from,
118 * @from: Source address, in kernel space. 119 * @from: Source address, in kernel space.
119 * @n: Number of bytes to copy. 120 * @n: Number of bytes to copy.
120 * 121 *
121 * Context: User context only. This function may sleep. 122 * Context: User context only. This function may sleep if pagefaults are
123 * enabled.
122 * 124 *
123 * Copy data from kernel space to user space. Caller must check 125 * Copy data from kernel space to user space. Caller must check
124 * the specified block with access_ok() before calling this function. 126 * the specified block with access_ok() before calling this function.
@@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((noreturn));
264 * @from: Source address, in kernel space. 266 * @from: Source address, in kernel space.
265 * @n: Number of bytes to copy. 267 * @n: Number of bytes to copy.
266 * 268 *
267 * Context: User context only. This function may sleep. 269 * Context: User context only. This function may sleep if pagefaults are
270 * enabled.
268 * 271 *
269 * Copy data from kernel space to user space. 272 * Copy data from kernel space to user space.
270 * 273 *
@@ -290,7 +293,8 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
290 * @from: Source address, in user space. 293 * @from: Source address, in user space.
291 * @n: Number of bytes to copy. 294 * @n: Number of bytes to copy.
292 * 295 *
293 * Context: User context only. This function may sleep. 296 * Context: User context only. This function may sleep if pagefaults are
297 * enabled.
294 * 298 *
295 * Copy data from user space to kernel space. 299 * Copy data from user space to kernel space.
296 * 300 *
@@ -348,7 +352,8 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
348 * strlen_user: - Get the size of a string in user space. 352 * strlen_user: - Get the size of a string in user space.
349 * @str: The string to measure. 353 * @str: The string to measure.
350 * 354 *
351 * Context: User context only. This function may sleep. 355 * Context: User context only. This function may sleep if pagefaults are
356 * enabled.
352 * 357 *
353 * Get the size of a NUL-terminated string in user space. 358 * Get the size of a NUL-terminated string in user space.
354 * 359 *
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 76515bcea2f1..4c8f5d7f9c23 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -399,7 +399,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
399 * user context. 399 * user context.
400 */ 400 */
401 fault = VM_FAULT_BADCONTEXT; 401 fault = VM_FAULT_BADCONTEXT;
402 if (unlikely(!user_space_fault(regs) || in_atomic() || !mm)) 402 if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
403 goto out; 403 goto out;
404 404
405 address = trans_exc_code & __FAIL_ADDR_MASK; 405 address = trans_exc_code & __FAIL_ADDR_MASK;
diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h
index ab66ddde777b..20a3591225cc 100644
--- a/arch/score/include/asm/uaccess.h
+++ b/arch/score/include/asm/uaccess.h
@@ -36,7 +36,8 @@
36 * @addr: User space pointer to start of block to check 36 * @addr: User space pointer to start of block to check
37 * @size: Size of block to check 37 * @size: Size of block to check
38 * 38 *
39 * Context: User context only. This function may sleep. 39 * Context: User context only. This function may sleep if pagefaults are
40 * enabled.
40 * 41 *
41 * Checks if a pointer to a block of memory in user space is valid. 42 * Checks if a pointer to a block of memory in user space is valid.
42 * 43 *
@@ -61,7 +62,8 @@
61 * @x: Value to copy to user space. 62 * @x: Value to copy to user space.
62 * @ptr: Destination address, in user space. 63 * @ptr: Destination address, in user space.
63 * 64 *
64 * Context: User context only. This function may sleep. 65 * Context: User context only. This function may sleep if pagefaults are
66 * enabled.
65 * 67 *
66 * This macro copies a single simple value from kernel space to user 68 * This macro copies a single simple value from kernel space to user
67 * space. It supports simple types like char and int, but not larger 69 * space. It supports simple types like char and int, but not larger
@@ -79,7 +81,8 @@
79 * @x: Variable to store result. 81 * @x: Variable to store result.
80 * @ptr: Source address, in user space. 82 * @ptr: Source address, in user space.
81 * 83 *
82 * Context: User context only. This function may sleep. 84 * Context: User context only. This function may sleep if pagefaults are
85 * enabled.
83 * 86 *
84 * This macro copies a single simple variable from user space to kernel 87 * This macro copies a single simple variable from user space to kernel
85 * space. It supports simple types like char and int, but not larger 88 * space. It supports simple types like char and int, but not larger
@@ -98,7 +101,8 @@
98 * @x: Value to copy to user space. 101 * @x: Value to copy to user space.
99 * @ptr: Destination address, in user space. 102 * @ptr: Destination address, in user space.
100 * 103 *
101 * Context: User context only. This function may sleep. 104 * Context: User context only. This function may sleep if pagefaults are
105 * enabled.
102 * 106 *
103 * This macro copies a single simple value from kernel space to user 107 * This macro copies a single simple value from kernel space to user
104 * space. It supports simple types like char and int, but not larger 108 * space. It supports simple types like char and int, but not larger
@@ -119,7 +123,8 @@
119 * @x: Variable to store result. 123 * @x: Variable to store result.
120 * @ptr: Source address, in user space. 124 * @ptr: Source address, in user space.
121 * 125 *
122 * Context: User context only. This function may sleep. 126 * Context: User context only. This function may sleep if pagefaults are
127 * enabled.
123 * 128 *
124 * This macro copies a single simple variable from user space to kernel 129 * This macro copies a single simple variable from user space to kernel
125 * space. It supports simple types like char and int, but not larger 130 * space. It supports simple types like char and int, but not larger
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 6860beb2a280..37a6c2e0e969 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -34,6 +34,7 @@
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/types.h> 35#include <linux/types.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/uaccess.h>
37 38
38/* 39/*
39 * This routine handles page faults. It determines the address, 40 * This routine handles page faults. It determines the address,
@@ -73,7 +74,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
73 * If we're in an interrupt or have no user 74 * If we're in an interrupt or have no user
74 * context, we must not take the fault.. 75 * context, we must not take the fault..
75 */ 76 */
76 if (in_atomic() || !mm) 77 if (pagefault_disabled() || !mm)
77 goto bad_area_nosemaphore; 78 goto bad_area_nosemaphore;
78 79
79 if (user_mode(regs)) 80 if (user_mode(regs))
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index a58fec9b55e0..79d8276377d1 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -17,6 +17,7 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/perf_event.h> 18#include <linux/perf_event.h>
19#include <linux/kdebug.h> 19#include <linux/kdebug.h>
20#include <linux/uaccess.h>
20#include <asm/io_trapped.h> 21#include <asm/io_trapped.h>
21#include <asm/mmu_context.h> 22#include <asm/mmu_context.h>
22#include <asm/tlbflush.h> 23#include <asm/tlbflush.h>
@@ -438,9 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
438 439
439 /* 440 /*
440 * If we're in an interrupt, have no user context or are running 441 * If we're in an interrupt, have no user context or are running
441 * in an atomic region then we must not take the fault: 442 * with pagefaults disabled then we must not take the fault:
442 */ 443 */
443 if (unlikely(in_atomic() || !mm)) { 444 if (unlikely(faulthandler_disabled() || !mm)) {
444 bad_area_nosemaphore(regs, error_code, address); 445 bad_area_nosemaphore(regs, error_code, address);
445 return; 446 return;
446 } 447 }
diff --git a/arch/sparc/include/asm/topology_64.h b/arch/sparc/include/asm/topology_64.h
index d1761df5cca6..01d17046225a 100644
--- a/arch/sparc/include/asm/topology_64.h
+++ b/arch/sparc/include/asm/topology_64.h
@@ -41,7 +41,7 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
41#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 41#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
42#define topology_core_id(cpu) (cpu_data(cpu).core_id) 42#define topology_core_id(cpu) (cpu_data(cpu).core_id)
43#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu]) 43#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
44#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) 44#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
45#endif /* CONFIG_SMP */ 45#endif /* CONFIG_SMP */
46 46
47extern cpumask_t cpu_core_map[NR_CPUS]; 47extern cpumask_t cpu_core_map[NR_CPUS];
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 70d817154fe8..c399e7b3b035 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -21,6 +21,7 @@
21#include <linux/perf_event.h> 21#include <linux/perf_event.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/kdebug.h> 23#include <linux/kdebug.h>
24#include <linux/uaccess.h>
24 25
25#include <asm/page.h> 26#include <asm/page.h>
26#include <asm/pgtable.h> 27#include <asm/pgtable.h>
@@ -29,7 +30,6 @@
29#include <asm/setup.h> 30#include <asm/setup.h>
30#include <asm/smp.h> 31#include <asm/smp.h>
31#include <asm/traps.h> 32#include <asm/traps.h>
32#include <asm/uaccess.h>
33 33
34#include "mm_32.h" 34#include "mm_32.h"
35 35
@@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
196 * If we're in an interrupt or have no user 196 * If we're in an interrupt or have no user
197 * context, we must not take the fault.. 197 * context, we must not take the fault..
198 */ 198 */
199 if (in_atomic() || !mm) 199 if (pagefault_disabled() || !mm)
200 goto no_context; 200 goto no_context;
201 201
202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 202 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 479823249429..e9268ea1a68d 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -22,12 +22,12 @@
22#include <linux/kdebug.h> 22#include <linux/kdebug.h>
23#include <linux/percpu.h> 23#include <linux/percpu.h>
24#include <linux/context_tracking.h> 24#include <linux/context_tracking.h>
25#include <linux/uaccess.h>
25 26
26#include <asm/page.h> 27#include <asm/page.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
28#include <asm/openprom.h> 29#include <asm/openprom.h>
29#include <asm/oplib.h> 30#include <asm/oplib.h>
30#include <asm/uaccess.h>
31#include <asm/asi.h> 31#include <asm/asi.h>
32#include <asm/lsu.h> 32#include <asm/lsu.h>
33#include <asm/sections.h> 33#include <asm/sections.h>
@@ -330,7 +330,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
330 * If we're in an interrupt or have no user 330 * If we're in an interrupt or have no user
331 * context, we must not take the fault.. 331 * context, we must not take the fault..
332 */ 332 */
333 if (in_atomic() || !mm) 333 if (faulthandler_disabled() || !mm)
334 goto intr_or_no_mm; 334 goto intr_or_no_mm;
335 335
336 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 336 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 449f864f0cef..a454ec5ff07a 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
53 unsigned long vaddr; 53 unsigned long vaddr;
54 long idx, type; 54 long idx, type;
55 55
56 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 56 preempt_disable();
57 pagefault_disable(); 57 pagefault_disable();
58 if (!PageHighMem(page)) 58 if (!PageHighMem(page))
59 return page_address(page); 59 return page_address(page);
@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
91 91
92 if (vaddr < FIXADDR_START) { // FIXME 92 if (vaddr < FIXADDR_START) { // FIXME
93 pagefault_enable(); 93 pagefault_enable();
94 preempt_enable();
94 return; 95 return;
95 } 96 }
96 97
@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
126 127
127 kmap_atomic_idx_pop(); 128 kmap_atomic_idx_pop();
128 pagefault_enable(); 129 pagefault_enable();
130 preempt_enable();
129} 131}
130EXPORT_SYMBOL(__kunmap_atomic); 132EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 559cb744112c..c5d08b89a96c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2738,7 +2738,7 @@ void hugetlb_setup(struct pt_regs *regs)
2738 struct mm_struct *mm = current->mm; 2738 struct mm_struct *mm = current->mm;
2739 struct tsb_config *tp; 2739 struct tsb_config *tp;
2740 2740
2741 if (in_atomic() || !mm) { 2741 if (faulthandler_disabled() || !mm) {
2742 const struct exception_table_entry *entry; 2742 const struct exception_table_entry *entry;
2743 2743
2744 entry = search_exception_tables(regs->tpc); 2744 entry = search_exception_tables(regs->tpc);
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index 938311844233..76b0d0ebb244 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -55,7 +55,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
55#define topology_physical_package_id(cpu) ((void)(cpu), 0) 55#define topology_physical_package_id(cpu) ((void)(cpu), 0)
56#define topology_core_id(cpu) (cpu) 56#define topology_core_id(cpu) (cpu)
57#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) 57#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
58#define topology_thread_cpumask(cpu) cpumask_of(cpu) 58#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
59#endif 59#endif
60 60
61#endif /* _ASM_TILE_TOPOLOGY_H */ 61#endif /* _ASM_TILE_TOPOLOGY_H */
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index f41cb53cf645..a33276bf5ca1 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -78,7 +78,8 @@ int __range_ok(unsigned long addr, unsigned long size);
78 * @addr: User space pointer to start of block to check 78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check 79 * @size: Size of block to check
80 * 80 *
81 * Context: User context only. This function may sleep. 81 * Context: User context only. This function may sleep if pagefaults are
82 * enabled.
82 * 83 *
83 * Checks if a pointer to a block of memory in user space is valid. 84 * Checks if a pointer to a block of memory in user space is valid.
84 * 85 *
@@ -192,7 +193,8 @@ extern int __get_user_bad(void)
192 * @x: Variable to store result. 193 * @x: Variable to store result.
193 * @ptr: Source address, in user space. 194 * @ptr: Source address, in user space.
194 * 195 *
195 * Context: User context only. This function may sleep. 196 * Context: User context only. This function may sleep if pagefaults are
197 * enabled.
196 * 198 *
197 * This macro copies a single simple variable from user space to kernel 199 * This macro copies a single simple variable from user space to kernel
198 * space. It supports simple types like char and int, but not larger 200 * space. It supports simple types like char and int, but not larger
@@ -274,7 +276,8 @@ extern int __put_user_bad(void)
274 * @x: Value to copy to user space. 276 * @x: Value to copy to user space.
275 * @ptr: Destination address, in user space. 277 * @ptr: Destination address, in user space.
276 * 278 *
277 * Context: User context only. This function may sleep. 279 * Context: User context only. This function may sleep if pagefaults are
280 * enabled.
278 * 281 *
279 * This macro copies a single simple value from kernel space to user 282 * This macro copies a single simple value from kernel space to user
280 * space. It supports simple types like char and int, but not larger 283 * space. It supports simple types like char and int, but not larger
@@ -330,7 +333,8 @@ extern int __put_user_bad(void)
330 * @from: Source address, in kernel space. 333 * @from: Source address, in kernel space.
331 * @n: Number of bytes to copy. 334 * @n: Number of bytes to copy.
332 * 335 *
333 * Context: User context only. This function may sleep. 336 * Context: User context only. This function may sleep if pagefaults are
337 * enabled.
334 * 338 *
335 * Copy data from kernel space to user space. Caller must check 339 * Copy data from kernel space to user space. Caller must check
336 * the specified block with access_ok() before calling this function. 340 * the specified block with access_ok() before calling this function.
@@ -366,7 +370,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
366 * @from: Source address, in user space. 370 * @from: Source address, in user space.
367 * @n: Number of bytes to copy. 371 * @n: Number of bytes to copy.
368 * 372 *
369 * Context: User context only. This function may sleep. 373 * Context: User context only. This function may sleep if pagefaults are
374 * enabled.
370 * 375 *
371 * Copy data from user space to kernel space. Caller must check 376 * Copy data from user space to kernel space. Caller must check
372 * the specified block with access_ok() before calling this function. 377 * the specified block with access_ok() before calling this function.
@@ -437,7 +442,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
437 * @from: Source address, in user space. 442 * @from: Source address, in user space.
438 * @n: Number of bytes to copy. 443 * @n: Number of bytes to copy.
439 * 444 *
440 * Context: User context only. This function may sleep. 445 * Context: User context only. This function may sleep if pagefaults are
446 * enabled.
441 * 447 *
442 * Copy data from user space to user space. Caller must check 448 * Copy data from user space to user space. Caller must check
443 * the specified blocks with access_ok() before calling this function. 449 * the specified blocks with access_ok() before calling this function.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index e83cc999da02..3f4f58d34a92 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_regs *regs,
354 354
355 /* 355 /*
356 * If we're in an interrupt, have no user context or are running in an 356 * If we're in an interrupt, have no user context or are running in an
357 * atomic region then we must not take the fault. 357 * region with pagefaults disabled then we must not take the fault.
358 */ 358 */
359 if (in_atomic() || !mm) { 359 if (pagefault_disabled() || !mm) {
360 vma = NULL; /* happy compiler */ 360 vma = NULL; /* happy compiler */
361 goto bad_area_nosemaphore; 361 goto bad_area_nosemaphore;
362 } 362 }
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 6aa2f2625447..fcd545014e79 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
201 int idx, type; 201 int idx, type;
202 pte_t *pte; 202 pte_t *pte;
203 203
204 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 204 preempt_disable();
205 pagefault_disable(); 205 pagefault_disable();
206 206
207 /* Avoid icache flushes by disallowing atomic executable mappings. */ 207 /* Avoid icache flushes by disallowing atomic executable mappings. */
@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
259 } 259 }
260 260
261 pagefault_enable(); 261 pagefault_enable();
262 preempt_enable();
262} 263}
263EXPORT_SYMBOL(__kunmap_atomic); 264EXPORT_SYMBOL(__kunmap_atomic);
264 265
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 8e4daf44e980..47ff9b7f3e5d 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -7,6 +7,7 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/hardirq.h> 8#include <linux/hardirq.h>
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/uaccess.h>
10#include <asm/current.h> 11#include <asm/current.h>
11#include <asm/pgtable.h> 12#include <asm/pgtable.h>
12#include <asm/tlbflush.h> 13#include <asm/tlbflush.h>
@@ -35,10 +36,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
35 *code_out = SEGV_MAPERR; 36 *code_out = SEGV_MAPERR;
36 37
37 /* 38 /*
38 * If the fault was during atomic operation, don't take the fault, just 39 * If the fault was with pagefaults disabled, don't take the fault, just
39 * fail. 40 * fail.
40 */ 41 */
41 if (in_atomic()) 42 if (faulthandler_disabled())
42 goto out_nosemaphore; 43 goto out_nosemaphore;
43 44
44 if (is_user) 45 if (is_user)
diff --git a/arch/unicore32/mm/fault.c b/arch/unicore32/mm/fault.c
index 0dc922dba915..afccef5529cc 100644
--- a/arch/unicore32/mm/fault.c
+++ b/arch/unicore32/mm/fault.c
@@ -218,7 +218,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
218 * If we're in an interrupt or have no user 218 * If we're in an interrupt or have no user
219 * context, we must not take the fault.. 219 * context, we must not take the fault..
220 */ 220 */
221 if (in_atomic() || !mm) 221 if (faulthandler_disabled() || !mm)
222 goto no_context; 222 goto no_context;
223 223
224 if (user_mode(regs)) 224 if (user_mode(regs))
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8f3271842533..dca71714f860 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -99,11 +99,9 @@ static __always_inline bool should_resched(void)
99 extern asmlinkage void ___preempt_schedule(void); 99 extern asmlinkage void ___preempt_schedule(void);
100# define __preempt_schedule() asm ("call ___preempt_schedule") 100# define __preempt_schedule() asm ("call ___preempt_schedule")
101 extern asmlinkage void preempt_schedule(void); 101 extern asmlinkage void preempt_schedule(void);
102# ifdef CONFIG_CONTEXT_TRACKING 102 extern asmlinkage void ___preempt_schedule_notrace(void);
103 extern asmlinkage void ___preempt_schedule_context(void); 103# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
104# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") 104 extern asmlinkage void preempt_schedule_notrace(void);
105 extern asmlinkage void preempt_schedule_context(void);
106# endif
107#endif 105#endif
108 106
109#endif /* __ASM_PREEMPT_H */ 107#endif /* __ASM_PREEMPT_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 17a8dced12da..222a6a3ca2b5 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -37,16 +37,6 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
37DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id); 37DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
38DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number); 38DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
39 39
40static inline struct cpumask *cpu_sibling_mask(int cpu)
41{
42 return per_cpu(cpu_sibling_map, cpu);
43}
44
45static inline struct cpumask *cpu_core_mask(int cpu)
46{
47 return per_cpu(cpu_core_map, cpu);
48}
49
50static inline struct cpumask *cpu_llc_shared_mask(int cpu) 40static inline struct cpumask *cpu_llc_shared_mask(int cpu)
51{ 41{
52 return per_cpu(cpu_llc_shared_map, cpu); 42 return per_cpu(cpu_llc_shared_map, cpu);
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 0e8f04f2c26f..5a77593fdace 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -124,7 +124,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
124 124
125#ifdef ENABLE_TOPO_DEFINES 125#ifdef ENABLE_TOPO_DEFINES
126#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 126#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
127#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 127#define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
128#endif 128#endif
129 129
130static inline void arch_fix_phys_package_id(int num, u32 slot) 130static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index ace9dec050b1..a8df874f3e88 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
74 * @addr: User space pointer to start of block to check 74 * @addr: User space pointer to start of block to check
75 * @size: Size of block to check 75 * @size: Size of block to check
76 * 76 *
77 * Context: User context only. This function may sleep. 77 * Context: User context only. This function may sleep if pagefaults are
78 * enabled.
78 * 79 *
79 * Checks if a pointer to a block of memory in user space is valid. 80 * Checks if a pointer to a block of memory in user space is valid.
80 * 81 *
@@ -145,7 +146,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
145 * @x: Variable to store result. 146 * @x: Variable to store result.
146 * @ptr: Source address, in user space. 147 * @ptr: Source address, in user space.
147 * 148 *
148 * Context: User context only. This function may sleep. 149 * Context: User context only. This function may sleep if pagefaults are
150 * enabled.
149 * 151 *
150 * This macro copies a single simple variable from user space to kernel 152 * This macro copies a single simple variable from user space to kernel
151 * space. It supports simple types like char and int, but not larger 153 * space. It supports simple types like char and int, but not larger
@@ -240,7 +242,8 @@ extern void __put_user_8(void);
240 * @x: Value to copy to user space. 242 * @x: Value to copy to user space.
241 * @ptr: Destination address, in user space. 243 * @ptr: Destination address, in user space.
242 * 244 *
243 * Context: User context only. This function may sleep. 245 * Context: User context only. This function may sleep if pagefaults are
246 * enabled.
244 * 247 *
245 * This macro copies a single simple value from kernel space to user 248 * This macro copies a single simple value from kernel space to user
246 * space. It supports simple types like char and int, but not larger 249 * space. It supports simple types like char and int, but not larger
@@ -455,7 +458,8 @@ struct __large_struct { unsigned long buf[100]; };
455 * @x: Variable to store result. 458 * @x: Variable to store result.
456 * @ptr: Source address, in user space. 459 * @ptr: Source address, in user space.
457 * 460 *
458 * Context: User context only. This function may sleep. 461 * Context: User context only. This function may sleep if pagefaults are
462 * enabled.
459 * 463 *
460 * This macro copies a single simple variable from user space to kernel 464 * This macro copies a single simple variable from user space to kernel
461 * space. It supports simple types like char and int, but not larger 465 * space. It supports simple types like char and int, but not larger
@@ -479,7 +483,8 @@ struct __large_struct { unsigned long buf[100]; };
479 * @x: Value to copy to user space. 483 * @x: Value to copy to user space.
480 * @ptr: Destination address, in user space. 484 * @ptr: Destination address, in user space.
481 * 485 *
482 * Context: User context only. This function may sleep. 486 * Context: User context only. This function may sleep if pagefaults are
487 * enabled.
483 * 488 *
484 * This macro copies a single simple value from kernel space to user 489 * This macro copies a single simple value from kernel space to user
485 * space. It supports simple types like char and int, but not larger 490 * space. It supports simple types like char and int, but not larger
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 3c03a5de64d3..7c8ad3451988 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -70,7 +70,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
70 * @from: Source address, in kernel space. 70 * @from: Source address, in kernel space.
71 * @n: Number of bytes to copy. 71 * @n: Number of bytes to copy.
72 * 72 *
73 * Context: User context only. This function may sleep. 73 * Context: User context only. This function may sleep if pagefaults are
74 * enabled.
74 * 75 *
75 * Copy data from kernel space to user space. Caller must check 76 * Copy data from kernel space to user space. Caller must check
76 * the specified block with access_ok() before calling this function. 77 * the specified block with access_ok() before calling this function.
@@ -117,7 +118,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
117 * @from: Source address, in user space. 118 * @from: Source address, in user space.
118 * @n: Number of bytes to copy. 119 * @n: Number of bytes to copy.
119 * 120 *
120 * Context: User context only. This function may sleep. 121 * Context: User context only. This function may sleep if pagefaults are
122 * enabled.
121 * 123 *
122 * Copy data from user space to kernel space. Caller must check 124 * Copy data from user space to kernel space. Caller must check
123 * the specified block with access_ok() before calling this function. 125 * the specified block with access_ok() before calling this function.
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 19980d9a6cc9..b9826a981fb2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2576,7 +2576,7 @@ static void intel_pmu_cpu_starting(int cpu)
2576 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { 2576 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
2577 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; 2577 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
2578 2578
2579 for_each_cpu(i, topology_thread_cpumask(cpu)) { 2579 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2580 struct intel_shared_regs *pc; 2580 struct intel_shared_regs *pc;
2581 2581
2582 pc = per_cpu(cpu_hw_events, i).shared_regs; 2582 pc = per_cpu(cpu_hw_events, i).shared_regs;
@@ -2594,7 +2594,7 @@ static void intel_pmu_cpu_starting(int cpu)
2594 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR]; 2594 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
2595 2595
2596 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2596 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2597 for_each_cpu(i, topology_thread_cpumask(cpu)) { 2597 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
2598 struct intel_excl_cntrs *c; 2598 struct intel_excl_cntrs *c;
2599 2599
2600 c = per_cpu(cpu_hw_events, i).excl_cntrs; 2600 c = per_cpu(cpu_hw_events, i).excl_cntrs;
@@ -3362,7 +3362,7 @@ static __init int fixup_ht_bug(void)
3362 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED)) 3362 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
3363 return 0; 3363 return 0;
3364 3364
3365 w = cpumask_weight(topology_thread_cpumask(cpu)); 3365 w = cpumask_weight(topology_sibling_cpumask(cpu));
3366 if (w > 1) { 3366 if (w > 1) {
3367 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n"); 3367 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
3368 return 0; 3368 return 0;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index e7d8c7608471..18ca99f2798b 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -12,7 +12,8 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
12{ 12{
13#ifdef CONFIG_SMP 13#ifdef CONFIG_SMP
14 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 14 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
15 seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu))); 15 seq_printf(m, "siblings\t: %d\n",
16 cpumask_weight(topology_core_cpumask(cpu)));
16 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 17 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
17 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 18 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
18 seq_printf(m, "apicid\t\t: %d\n", c->apicid); 19 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 05fd74f537d6..64341aa485ae 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -40,7 +40,5 @@ EXPORT_SYMBOL(empty_zero_page);
40 40
41#ifdef CONFIG_PREEMPT 41#ifdef CONFIG_PREEMPT
42EXPORT_SYMBOL(___preempt_schedule); 42EXPORT_SYMBOL(___preempt_schedule);
43#ifdef CONFIG_CONTEXT_TRACKING 43EXPORT_SYMBOL(___preempt_schedule_notrace);
44EXPORT_SYMBOL(___preempt_schedule_context);
45#endif
46#endif 44#endif
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 6e338e3b1dc0..c648139d68d7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -445,11 +445,10 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
445} 445}
446 446
447/* 447/*
448 * MONITOR/MWAIT with no hints, used for default default C1 state. 448 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
449 * This invokes MWAIT with interrutps enabled and no flags, 449 * with interrupts enabled and no flags, which is backwards compatible with the
450 * which is backwards compatible with the original MWAIT implementation. 450 * original MWAIT implementation.
451 */ 451 */
452
453static void mwait_idle(void) 452static void mwait_idle(void)
454{ 453{
455 if (!current_set_polling_and_test()) { 454 if (!current_set_polling_and_test()) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 50e547eac8cd..0e8209619455 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -314,10 +314,10 @@ topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
314 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); 314 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
315} 315}
316 316
317#define link_mask(_m, c1, c2) \ 317#define link_mask(mfunc, c1, c2) \
318do { \ 318do { \
319 cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \ 319 cpumask_set_cpu((c1), mfunc(c2)); \
320 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 320 cpumask_set_cpu((c2), mfunc(c1)); \
321} while (0) 321} while (0)
322 322
323static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 323static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
@@ -398,9 +398,9 @@ void set_cpu_sibling_map(int cpu)
398 cpumask_set_cpu(cpu, cpu_sibling_setup_mask); 398 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
399 399
400 if (!has_mp) { 400 if (!has_mp) {
401 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 401 cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
402 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); 402 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
403 cpumask_set_cpu(cpu, cpu_core_mask(cpu)); 403 cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
404 c->booted_cores = 1; 404 c->booted_cores = 1;
405 return; 405 return;
406 } 406 }
@@ -409,32 +409,34 @@ void set_cpu_sibling_map(int cpu)
409 o = &cpu_data(i); 409 o = &cpu_data(i);
410 410
411 if ((i == cpu) || (has_smt && match_smt(c, o))) 411 if ((i == cpu) || (has_smt && match_smt(c, o)))
412 link_mask(sibling, cpu, i); 412 link_mask(topology_sibling_cpumask, cpu, i);
413 413
414 if ((i == cpu) || (has_mp && match_llc(c, o))) 414 if ((i == cpu) || (has_mp && match_llc(c, o)))
415 link_mask(llc_shared, cpu, i); 415 link_mask(cpu_llc_shared_mask, cpu, i);
416 416
417 } 417 }
418 418
419 /* 419 /*
420 * This needs a separate iteration over the cpus because we rely on all 420 * This needs a separate iteration over the cpus because we rely on all
421 * cpu_sibling_mask links to be set-up. 421 * topology_sibling_cpumask links to be set-up.
422 */ 422 */
423 for_each_cpu(i, cpu_sibling_setup_mask) { 423 for_each_cpu(i, cpu_sibling_setup_mask) {
424 o = &cpu_data(i); 424 o = &cpu_data(i);
425 425
426 if ((i == cpu) || (has_mp && match_die(c, o))) { 426 if ((i == cpu) || (has_mp && match_die(c, o))) {
427 link_mask(core, cpu, i); 427 link_mask(topology_core_cpumask, cpu, i);
428 428
429 /* 429 /*
430 * Does this new cpu bringup a new core? 430 * Does this new cpu bringup a new core?
431 */ 431 */
432 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) { 432 if (cpumask_weight(
433 topology_sibling_cpumask(cpu)) == 1) {
433 /* 434 /*
434 * for each core in package, increment 435 * for each core in package, increment
435 * the booted_cores for this new cpu 436 * the booted_cores for this new cpu
436 */ 437 */
437 if (cpumask_first(cpu_sibling_mask(i)) == i) 438 if (cpumask_first(
439 topology_sibling_cpumask(i)) == i)
438 c->booted_cores++; 440 c->booted_cores++;
439 /* 441 /*
440 * increment the core count for all 442 * increment the core count for all
@@ -1009,8 +1011,8 @@ static __init void disable_smp(void)
1009 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 1011 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
1010 else 1012 else
1011 physid_set_mask_of_physid(0, &phys_cpu_present_map); 1013 physid_set_mask_of_physid(0, &phys_cpu_present_map);
1012 cpumask_set_cpu(0, cpu_sibling_mask(0)); 1014 cpumask_set_cpu(0, topology_sibling_cpumask(0));
1013 cpumask_set_cpu(0, cpu_core_mask(0)); 1015 cpumask_set_cpu(0, topology_core_cpumask(0));
1014} 1016}
1015 1017
1016enum { 1018enum {
@@ -1293,22 +1295,22 @@ static void remove_siblinginfo(int cpu)
1293 int sibling; 1295 int sibling;
1294 struct cpuinfo_x86 *c = &cpu_data(cpu); 1296 struct cpuinfo_x86 *c = &cpu_data(cpu);
1295 1297
1296 for_each_cpu(sibling, cpu_core_mask(cpu)) { 1298 for_each_cpu(sibling, topology_core_cpumask(cpu)) {
1297 cpumask_clear_cpu(cpu, cpu_core_mask(sibling)); 1299 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
1298 /*/ 1300 /*/
1299 * last thread sibling in this cpu core going down 1301 * last thread sibling in this cpu core going down
1300 */ 1302 */
1301 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) 1303 if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
1302 cpu_data(sibling).booted_cores--; 1304 cpu_data(sibling).booted_cores--;
1303 } 1305 }
1304 1306
1305 for_each_cpu(sibling, cpu_sibling_mask(cpu)) 1307 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
1306 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling)); 1308 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
1307 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) 1309 for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1308 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling)); 1310 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1309 cpumask_clear(cpu_llc_shared_mask(cpu)); 1311 cpumask_clear(cpu_llc_shared_mask(cpu));
1310 cpumask_clear(cpu_sibling_mask(cpu)); 1312 cpumask_clear(topology_sibling_cpumask(cpu));
1311 cpumask_clear(cpu_core_mask(cpu)); 1313 cpumask_clear(topology_core_cpumask(cpu));
1312 c->phys_proc_id = 0; 1314 c->phys_proc_id = 0;
1313 c->cpu_core_id = 0; 1315 c->cpu_core_id = 0;
1314 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); 1316 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 26488487bc61..dd8d0791dfb5 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -113,7 +113,7 @@ static void check_tsc_warp(unsigned int timeout)
113 */ 113 */
114static inline unsigned int loop_timeout(int cpu) 114static inline unsigned int loop_timeout(int cpu)
115{ 115{
116 return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20; 116 return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
117} 117}
118 118
119/* 119/*
diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
index 37d8fa4438f0..a0695be19864 100644
--- a/arch/x86/kernel/x8664_ksyms_64.c
+++ b/arch/x86/kernel/x8664_ksyms_64.c
@@ -75,7 +75,5 @@ EXPORT_SYMBOL(native_load_gs_index);
75 75
76#ifdef CONFIG_PREEMPT 76#ifdef CONFIG_PREEMPT
77EXPORT_SYMBOL(___preempt_schedule); 77EXPORT_SYMBOL(___preempt_schedule);
78#ifdef CONFIG_CONTEXT_TRACKING 78EXPORT_SYMBOL(___preempt_schedule_notrace);
79EXPORT_SYMBOL(___preempt_schedule_context);
80#endif
81#endif 79#endif
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
index 5eb715087b80..e407941d0488 100644
--- a/arch/x86/lib/thunk_32.S
+++ b/arch/x86/lib/thunk_32.S
@@ -38,8 +38,6 @@
38 38
39#ifdef CONFIG_PREEMPT 39#ifdef CONFIG_PREEMPT
40 THUNK ___preempt_schedule, preempt_schedule 40 THUNK ___preempt_schedule, preempt_schedule
41#ifdef CONFIG_CONTEXT_TRACKING 41 THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
42 THUNK ___preempt_schedule_context, preempt_schedule_context
43#endif
44#endif 42#endif
45 43
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
index f89ba4e93025..2198902329b5 100644
--- a/arch/x86/lib/thunk_64.S
+++ b/arch/x86/lib/thunk_64.S
@@ -49,9 +49,7 @@
49 49
50#ifdef CONFIG_PREEMPT 50#ifdef CONFIG_PREEMPT
51 THUNK ___preempt_schedule, preempt_schedule 51 THUNK ___preempt_schedule, preempt_schedule
52#ifdef CONFIG_CONTEXT_TRACKING 52 THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
53 THUNK ___preempt_schedule_context, preempt_schedule_context
54#endif
55#endif 53#endif
56 54
57#if defined(CONFIG_TRACE_IRQFLAGS) \ 55#if defined(CONFIG_TRACE_IRQFLAGS) \
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index e2f5e21c03b3..91d93b95bd86 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
647 * @from: Source address, in kernel space. 647 * @from: Source address, in kernel space.
648 * @n: Number of bytes to copy. 648 * @n: Number of bytes to copy.
649 * 649 *
650 * Context: User context only. This function may sleep. 650 * Context: User context only. This function may sleep if pagefaults are
651 * enabled.
651 * 652 *
652 * Copy data from kernel space to user space. 653 * Copy data from kernel space to user space.
653 * 654 *
@@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user);
668 * @from: Source address, in user space. 669 * @from: Source address, in user space.
669 * @n: Number of bytes to copy. 670 * @n: Number of bytes to copy.
670 * 671 *
671 * Context: User context only. This function may sleep. 672 * Context: User context only. This function may sleep if pagefaults are
673 * enabled.
672 * 674 *
673 * Copy data from user space to kernel space. 675 * Copy data from user space to kernel space.
674 * 676 *
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 181c53bac3a7..9dc909841739 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -13,6 +13,7 @@
13#include <linux/hugetlb.h> /* hstate_index_to_shift */ 13#include <linux/hugetlb.h> /* hstate_index_to_shift */
14#include <linux/prefetch.h> /* prefetchw */ 14#include <linux/prefetch.h> /* prefetchw */
15#include <linux/context_tracking.h> /* exception_enter(), ... */ 15#include <linux/context_tracking.h> /* exception_enter(), ... */
16#include <linux/uaccess.h> /* faulthandler_disabled() */
16 17
17#include <asm/traps.h> /* dotraplinkage, ... */ 18#include <asm/traps.h> /* dotraplinkage, ... */
18#include <asm/pgalloc.h> /* pgd_*(), ... */ 19#include <asm/pgalloc.h> /* pgd_*(), ... */
@@ -1126,9 +1127,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
1126 1127
1127 /* 1128 /*
1128 * If we're in an interrupt, have no user context or are running 1129 * If we're in an interrupt, have no user context or are running
1129 * in an atomic region then we must not take the fault: 1130 * in a region with pagefaults disabled then we must not take the fault
1130 */ 1131 */
1131 if (unlikely(in_atomic() || !mm)) { 1132 if (unlikely(faulthandler_disabled() || !mm)) {
1132 bad_area_nosemaphore(regs, error_code, address); 1133 bad_area_nosemaphore(regs, error_code, address);
1133 return; 1134 return;
1134 } 1135 }
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index 4500142bc4aa..eecb207a2037 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
35 unsigned long vaddr; 35 unsigned long vaddr;
36 int idx, type; 36 int idx, type;
37 37
38 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 38 preempt_disable();
39 pagefault_disable(); 39 pagefault_disable();
40 40
41 if (!PageHighMem(page)) 41 if (!PageHighMem(page))
@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
100#endif 100#endif
101 101
102 pagefault_enable(); 102 pagefault_enable();
103 preempt_enable();
103} 104}
104EXPORT_SYMBOL(__kunmap_atomic); 105EXPORT_SYMBOL(__kunmap_atomic);
105 106
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9ca35fc60cfe..2b7ece0e103a 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
59 unsigned long vaddr; 59 unsigned long vaddr;
60 int idx, type; 60 int idx, type;
61 61
62 preempt_disable();
62 pagefault_disable(); 63 pagefault_disable();
63 64
64 type = kmap_atomic_idx_push(); 65 type = kmap_atomic_idx_push();
@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
117 } 118 }
118 119
119 pagefault_enable(); 120 pagefault_enable();
121 preempt_enable();
120} 122}
121EXPORT_SYMBOL_GPL(iounmap_atomic); 123EXPORT_SYMBOL_GPL(iounmap_atomic);
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 9e3571a6535c..83a44a33cfa1 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -15,10 +15,10 @@
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/hardirq.h> 17#include <linux/hardirq.h>
18#include <linux/uaccess.h>
18#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/hardirq.h> 21#include <asm/hardirq.h>
21#include <asm/uaccess.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23 23
24DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; 24DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
57 /* If we're in an interrupt or have no user 57 /* If we're in an interrupt or have no user
58 * context, we must not take the fault.. 58 * context, we must not take the fault..
59 */ 59 */
60 if (in_atomic() || !mm) { 60 if (faulthandler_disabled() || !mm) {
61 bad_page_fault(regs, address, SIGSEGV); 61 bad_page_fault(regs, address, SIGSEGV);
62 return; 62 return;
63 } 63 }
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
index 8cfb71ec0937..184ceadccc1a 100644
--- a/arch/xtensa/mm/highmem.c
+++ b/arch/xtensa/mm/highmem.c
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
42 enum fixed_addresses idx; 42 enum fixed_addresses idx;
43 unsigned long vaddr; 43 unsigned long vaddr;
44 44
45 preempt_disable();
45 pagefault_disable(); 46 pagefault_disable();
46 if (!PageHighMem(page)) 47 if (!PageHighMem(page))
47 return page_address(page); 48 return page_address(page);
@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
79 } 80 }
80 81
81 pagefault_enable(); 82 pagefault_enable();
83 preempt_enable();
82} 84}
83EXPORT_SYMBOL(__kunmap_atomic); 85EXPORT_SYMBOL(__kunmap_atomic);
84 86