aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2006-12-06 23:32:20 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:21 -0500
commita866374aecc90c7d90619727ccd851ac096b2fc7 (patch)
treeeabae0b36b5281dcef20563470c7f05549689b8c
parent6edaf68a87d17570790fd55f0c451a29ec1d6703 (diff)
[PATCH] mm: pagefault_{disable,enable}()
Introduce pagefault_{disable,enable}() and use these where previously we did manual preempt increments/decrements to make the pagefault handler do the atomic thing. Currently they still rely on the increased preempt count, but do not rely on the disabled preemption, this might go away in the future. (NOTE: the extra barrier() in pagefault_disable might fix some holes on machines which have too many registers for their own good) [heiko.carstens@de.ibm.com: s390 fix] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Nick Piggin <npiggin@suse.de> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/frv/kernel/futex.c4
-rw-r--r--arch/i386/mm/highmem.c10
-rw-r--r--arch/mips/mm/highmem.c10
-rw-r--r--arch/s390/lib/uaccess_std.c6
-rw-r--r--arch/sparc/mm/highmem.c8
-rw-r--r--include/asm-frv/highmem.h5
-rw-r--r--include/asm-generic/futex.h4
-rw-r--r--include/asm-i386/futex.h4
-rw-r--r--include/asm-ia64/futex.h4
-rw-r--r--include/asm-mips/futex.h4
-rw-r--r--include/asm-parisc/futex.h4
-rw-r--r--include/asm-powerpc/futex.h4
-rw-r--r--include/asm-ppc/highmem.h8
-rw-r--r--include/asm-sparc64/futex.h4
-rw-r--r--include/asm-x86_64/futex.h4
-rw-r--r--include/linux/uaccess.h39
-rw-r--r--kernel/futex.c28
17 files changed, 88 insertions, 62 deletions
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index eae874a970c6..53dc5ed1ebda 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
200 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 200 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
201 return -EFAULT; 201 return -EFAULT;
202 202
203 inc_preempt_count(); 203 pagefault_disable();
204 204
205 switch (op) { 205 switch (op) {
206 case FUTEX_OP_SET: 206 case FUTEX_OP_SET:
@@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
223 break; 223 break;
224 } 224 }
225 225
226 dec_preempt_count(); 226 pagefault_enable();
227 227
228 if (!ret) { 228 if (!ret) {
229 switch (cmp) { 229 switch (cmp) {
diff --git a/arch/i386/mm/highmem.c b/arch/i386/mm/highmem.c
index f9f647cdbc7b..178bbfe6cbac 100644
--- a/arch/i386/mm/highmem.c
+++ b/arch/i386/mm/highmem.c
@@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
32 unsigned long vaddr; 32 unsigned long vaddr;
33 33
34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 34 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
35 inc_preempt_count(); 35 pagefault_disable();
36 if (!PageHighMem(page)) 36 if (!PageHighMem(page))
37 return page_address(page); 37 return page_address(page);
38 38
@@ -52,8 +52,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
52 52
53#ifdef CONFIG_DEBUG_HIGHMEM 53#ifdef CONFIG_DEBUG_HIGHMEM
54 if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) { 54 if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
55 dec_preempt_count(); 55 pagefault_enable();
56 preempt_check_resched();
57 return; 56 return;
58 } 57 }
59 58
@@ -68,8 +67,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
68 */ 67 */
69 kpte_clear_flush(kmap_pte-idx, vaddr); 68 kpte_clear_flush(kmap_pte-idx, vaddr);
70 69
71 dec_preempt_count(); 70 pagefault_enable();
72 preempt_check_resched();
73} 71}
74 72
75/* This is the same as kmap_atomic() but can map memory that doesn't 73/* This is the same as kmap_atomic() but can map memory that doesn't
@@ -80,7 +78,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
80 enum fixed_addresses idx; 78 enum fixed_addresses idx;
81 unsigned long vaddr; 79 unsigned long vaddr;
82 80
83 inc_preempt_count(); 81 pagefault_disable();
84 82
85 idx = type + KM_TYPE_NR*smp_processor_id(); 83 idx = type + KM_TYPE_NR*smp_processor_id();
86 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 84 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 99ebf3ccc222..675502ada5a2 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
39 unsigned long vaddr; 39 unsigned long vaddr;
40 40
41 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 41 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
42 inc_preempt_count(); 42 pagefault_disable();
43 if (!PageHighMem(page)) 43 if (!PageHighMem(page))
44 return page_address(page); 44 return page_address(page);
45 45
@@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
62 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); 62 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
63 63
64 if (vaddr < FIXADDR_START) { // FIXME 64 if (vaddr < FIXADDR_START) { // FIXME
65 dec_preempt_count(); 65 pagefault_enable();
66 preempt_check_resched();
67 return; 66 return;
68 } 67 }
69 68
@@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
78 local_flush_tlb_one(vaddr); 77 local_flush_tlb_one(vaddr);
79#endif 78#endif
80 79
81 dec_preempt_count(); 80 pagefault_enable();
82 preempt_check_resched();
83} 81}
84 82
85#ifndef CONFIG_LIMITED_DMA 83#ifndef CONFIG_LIMITED_DMA
@@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
92 enum fixed_addresses idx; 90 enum fixed_addresses idx;
93 unsigned long vaddr; 91 unsigned long vaddr;
94 92
95 inc_preempt_count(); 93 pagefault_disable();
96 94
97 idx = type + KM_TYPE_NR*smp_processor_id(); 95 idx = type + KM_TYPE_NR*smp_processor_id();
98 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 96 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index 2d549ed2e113..bbaca66fa293 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -11,7 +11,7 @@
11 11
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/futex.h> 15#include <asm/futex.h>
16 16
17#ifndef __s390x__ 17#ifndef __s390x__
@@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
258{ 258{
259 int oldval = 0, newval, ret; 259 int oldval = 0, newval, ret;
260 260
261 inc_preempt_count(); 261 pagefault_disable();
262 262
263 switch (op) { 263 switch (op) {
264 case FUTEX_OP_SET: 264 case FUTEX_OP_SET:
@@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
284 default: 284 default:
285 ret = -ENOSYS; 285 ret = -ENOSYS;
286 } 286 }
287 dec_preempt_count(); 287 pagefault_enable();
288 *old = oldval; 288 *old = oldval;
289 return ret; 289 return ret;
290} 290}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 4d8ed9c65182..01fc6c254292 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
35 unsigned long vaddr; 35 unsigned long vaddr;
36 36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 inc_preempt_count(); 38 pagefault_disable();
39 if (!PageHighMem(page)) 39 if (!PageHighMem(page))
40 return page_address(page); 40 return page_address(page);
41 41
@@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
70 unsigned long idx = type + KM_TYPE_NR*smp_processor_id(); 70 unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
71 71
72 if (vaddr < FIXADDR_START) { // FIXME 72 if (vaddr < FIXADDR_START) { // FIXME
73 dec_preempt_count(); 73 pagefault_enable();
74 preempt_check_resched();
75 return; 74 return;
76 } 75 }
77 76
@@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
97#endif 96#endif
98#endif 97#endif
99 98
100 dec_preempt_count(); 99 pagefault_enable();
101 preempt_check_resched();
102} 100}
103 101
104/* We may be fed a pagetable here by ptep_to_xxx and others. */ 102/* We may be fed a pagetable here by ptep_to_xxx and others. */
diff --git a/include/asm-frv/highmem.h b/include/asm-frv/highmem.h
index 0f390f41f816..ff4d6cdeb152 100644
--- a/include/asm-frv/highmem.h
+++ b/include/asm-frv/highmem.h
@@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
115{ 115{
116 unsigned long paddr; 116 unsigned long paddr;
117 117
118 inc_preempt_count(); 118 pagefault_disable();
119 paddr = page_to_phys(page); 119 paddr = page_to_phys(page);
120 120
121 switch (type) { 121 switch (type) {
@@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
170 default: 170 default:
171 BUG(); 171 BUG();
172 } 172 }
173 dec_preempt_count(); 173 pagefault_enable();
174 preempt_check_resched();
175} 174}
176 175
177#endif /* !__ASSEMBLY__ */ 176#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index df893c160318..f422df0956a2 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 inc_preempt_count(); 24 pagefault_disable();
25 25
26 switch (op) { 26 switch (op) {
27 case FUTEX_OP_SET: 27 case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33 ret = -ENOSYS; 33 ret = -ENOSYS;
34 } 34 }
35 35
36 dec_preempt_count(); 36 pagefault_enable();
37 37
38 if (!ret) { 38 if (!ret) {
39 switch (cmp) { 39 switch (cmp) {
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h
index 946d97cfea23..438ef0ec7101 100644
--- a/include/asm-i386/futex.h
+++ b/include/asm-i386/futex.h
@@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 56 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
57 return -EFAULT; 57 return -EFAULT;
58 58
59 inc_preempt_count(); 59 pagefault_disable();
60 60
61 if (op == FUTEX_OP_SET) 61 if (op == FUTEX_OP_SET)
62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); 62 __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
88 } 88 }
89 } 89 }
90 90
91 dec_preempt_count(); 91 pagefault_enable();
92 92
93 if (!ret) { 93 if (!ret) {
94 switch (cmp) { 94 switch (cmp) {
diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h
index 07d77f3a8cbe..8a98a2654139 100644
--- a/include/asm-ia64/futex.h
+++ b/include/asm-ia64/futex.h
@@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 59 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
60 return -EFAULT; 60 return -EFAULT;
61 61
62 inc_preempt_count(); 62 pagefault_disable();
63 63
64 switch (op) { 64 switch (op) {
65 case FUTEX_OP_SET: 65 case FUTEX_OP_SET:
@@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
83 ret = -ENOSYS; 83 ret = -ENOSYS;
84 } 84 }
85 85
86 dec_preempt_count(); 86 pagefault_enable();
87 87
88 if (!ret) { 88 if (!ret) {
89 switch (cmp) { 89 switch (cmp) {
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h
index 927a216bd530..47e5679c2353 100644
--- a/include/asm-mips/futex.h
+++ b/include/asm-mips/futex.h
@@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 88 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
89 return -EFAULT; 89 return -EFAULT;
90 90
91 inc_preempt_count(); 91 pagefault_disable();
92 92
93 switch (op) { 93 switch (op) {
94 case FUTEX_OP_SET: 94 case FUTEX_OP_SET:
@@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
115 ret = -ENOSYS; 115 ret = -ENOSYS;
116 } 116 }
117 117
118 dec_preempt_count(); 118 pagefault_enable();
119 119
120 if (!ret) { 120 if (!ret) {
121 switch (cmp) { 121 switch (cmp) {
diff --git a/include/asm-parisc/futex.h b/include/asm-parisc/futex.h
index d84bbb283fd1..dbee6e60aa81 100644
--- a/include/asm-parisc/futex.h
+++ b/include/asm-parisc/futex.h
@@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 21 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
22 return -EFAULT; 22 return -EFAULT;
23 23
24 inc_preempt_count(); 24 pagefault_disable();
25 25
26 switch (op) { 26 switch (op) {
27 case FUTEX_OP_SET: 27 case FUTEX_OP_SET:
@@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
33 ret = -ENOSYS; 33 ret = -ENOSYS;
34 } 34 }
35 35
36 dec_preempt_count(); 36 pagefault_enable();
37 37
38 if (!ret) { 38 if (!ret) {
39 switch (cmp) { 39 switch (cmp) {
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 936422e54891..3f3673fd3ff3 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
44 return -EFAULT; 44 return -EFAULT;
45 45
46 inc_preempt_count(); 46 pagefault_disable();
47 47
48 switch (op) { 48 switch (op) {
49 case FUTEX_OP_SET: 49 case FUTEX_OP_SET:
@@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65 ret = -ENOSYS; 65 ret = -ENOSYS;
66 } 66 }
67 67
68 dec_preempt_count(); 68 pagefault_enable();
69 69
70 if (!ret) { 70 if (!ret) {
71 switch (cmp) { 71 switch (cmp) {
diff --git a/include/asm-ppc/highmem.h b/include/asm-ppc/highmem.h
index 1d2c4ef81c22..f7b21ee302b4 100644
--- a/include/asm-ppc/highmem.h
+++ b/include/asm-ppc/highmem.h
@@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
79 unsigned long vaddr; 79 unsigned long vaddr;
80 80
81 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ 81 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
82 inc_preempt_count(); 82 pagefault_disable();
83 if (!PageHighMem(page)) 83 if (!PageHighMem(page))
84 return page_address(page); 84 return page_address(page);
85 85
@@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
101 unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); 101 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
102 102
103 if (vaddr < KMAP_FIX_BEGIN) { // FIXME 103 if (vaddr < KMAP_FIX_BEGIN) { // FIXME
104 dec_preempt_count(); 104 pagefault_enable();
105 preempt_check_resched();
106 return; 105 return;
107 } 106 }
108 107
@@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
115 pte_clear(&init_mm, vaddr, kmap_pte+idx); 114 pte_clear(&init_mm, vaddr, kmap_pte+idx);
116 flush_tlb_page(NULL, vaddr); 115 flush_tlb_page(NULL, vaddr);
117#endif 116#endif
118 dec_preempt_count(); 117 pagefault_enable();
119 preempt_check_resched();
120} 118}
121 119
122static inline struct page *kmap_atomic_to_page(void *ptr) 120static inline struct page *kmap_atomic_to_page(void *ptr)
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h
index 7392fc4a954e..876312fe82cc 100644
--- a/include/asm-sparc64/futex.h
+++ b/include/asm-sparc64/futex.h
@@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 45 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
46 oparg = 1 << oparg; 46 oparg = 1 << oparg;
47 47
48 inc_preempt_count(); 48 pagefault_disable();
49 49
50 switch (op) { 50 switch (op) {
51 case FUTEX_OP_SET: 51 case FUTEX_OP_SET:
@@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
67 ret = -ENOSYS; 67 ret = -ENOSYS;
68 } 68 }
69 69
70 dec_preempt_count(); 70 pagefault_enable();
71 71
72 if (!ret) { 72 if (!ret) {
73 switch (cmp) { 73 switch (cmp) {
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h
index 9804bf07b092..5cdfb08013c3 100644
--- a/include/asm-x86_64/futex.h
+++ b/include/asm-x86_64/futex.h
@@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 55 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
56 return -EFAULT; 56 return -EFAULT;
57 57
58 inc_preempt_count(); 58 pagefault_disable();
59 59
60 switch (op) { 60 switch (op) {
61 case FUTEX_OP_SET: 61 case FUTEX_OP_SET:
@@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
78 ret = -ENOSYS; 78 ret = -ENOSYS;
79 } 79 }
80 80
81 dec_preempt_count(); 81 pagefault_enable();
82 82
83 if (!ret) { 83 if (!ret) {
84 switch (cmp) { 84 switch (cmp) {
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f11c7be..67918c22339c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
1#ifndef __LINUX_UACCESS_H__ 1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__ 2#define __LINUX_UACCESS_H__
3 3
4#include <linux/preempt.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5 6
7/*
8 * These routines enable/disable the pagefault handler in that
9 * it will not take any locks and go straight to the fixup table.
10 *
11 * They have great resemblance to the preempt_disable/enable calls
12 * and in fact they are identical; this is because currently there is
13 * no other way to make the pagefault handlers do this. So we do
14 * disable preemption but we don't necessarily care about that.
15 */
16static inline void pagefault_disable(void)
17{
18 inc_preempt_count();
19 /*
20 * make sure to have issued the store before a pagefault
21 * can hit.
22 */
23 barrier();
24}
25
26static inline void pagefault_enable(void)
27{
28 /*
29 * make sure to issue those last loads/stores before enabling
30 * the pagefault handler again.
31 */
32 barrier();
33 dec_preempt_count();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched();
39}
40
6#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
7 42
8static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -35,9 +70,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
35 ({ \ 70 ({ \
36 long ret; \ 71 long ret; \
37 \ 72 \
38 inc_preempt_count(); \ 73 pagefault_disable(); \
39 ret = __get_user(retval, addr); \ 74 ret = __get_user(retval, addr); \
40 dec_preempt_count(); \ 75 pagefault_enable(); \
41 ret; \ 76 ret; \
42 }) 77 })
43 78
diff --git a/kernel/futex.c b/kernel/futex.c
index 93ef30ba209f..af7b81cbde30 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -282,9 +282,9 @@ static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
282{ 282{
283 int ret; 283 int ret;
284 284
285 inc_preempt_count(); 285 pagefault_disable();
286 ret = __copy_from_user_inatomic(dest, from, sizeof(u32)); 286 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
287 dec_preempt_count(); 287 pagefault_enable();
288 288
289 return ret ? -EFAULT : 0; 289 return ret ? -EFAULT : 0;
290} 290}
@@ -585,9 +585,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
585 if (!(uval & FUTEX_OWNER_DIED)) { 585 if (!(uval & FUTEX_OWNER_DIED)) {
586 newval = FUTEX_WAITERS | new_owner->pid; 586 newval = FUTEX_WAITERS | new_owner->pid;
587 587
588 inc_preempt_count(); 588 pagefault_disable();
589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 589 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
590 dec_preempt_count(); 590 pagefault_enable();
591 if (curval == -EFAULT) 591 if (curval == -EFAULT)
592 return -EFAULT; 592 return -EFAULT;
593 if (curval != uval) 593 if (curval != uval)
@@ -618,9 +618,9 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
618 * There is no waiter, so we unlock the futex. The owner died 618 * There is no waiter, so we unlock the futex. The owner died
619 * bit has not to be preserved here. We are the owner: 619 * bit has not to be preserved here. We are the owner:
620 */ 620 */
621 inc_preempt_count(); 621 pagefault_disable();
622 oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0); 622 oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
623 dec_preempt_count(); 623 pagefault_enable();
624 624
625 if (oldval == -EFAULT) 625 if (oldval == -EFAULT)
626 return oldval; 626 return oldval;
@@ -1158,9 +1158,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1158 */ 1158 */
1159 newval = current->pid; 1159 newval = current->pid;
1160 1160
1161 inc_preempt_count(); 1161 pagefault_disable();
1162 curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval); 1162 curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
1163 dec_preempt_count(); 1163 pagefault_enable();
1164 1164
1165 if (unlikely(curval == -EFAULT)) 1165 if (unlikely(curval == -EFAULT))
1166 goto uaddr_faulted; 1166 goto uaddr_faulted;
@@ -1183,9 +1183,9 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1183 uval = curval; 1183 uval = curval;
1184 newval = uval | FUTEX_WAITERS; 1184 newval = uval | FUTEX_WAITERS;
1185 1185
1186 inc_preempt_count(); 1186 pagefault_disable();
1187 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); 1187 curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
1188 dec_preempt_count(); 1188 pagefault_enable();
1189 1189
1190 if (unlikely(curval == -EFAULT)) 1190 if (unlikely(curval == -EFAULT))
1191 goto uaddr_faulted; 1191 goto uaddr_faulted;
@@ -1215,10 +1215,10 @@ static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1215 newval = current->pid | 1215 newval = current->pid |
1216 FUTEX_OWNER_DIED | FUTEX_WAITERS; 1216 FUTEX_OWNER_DIED | FUTEX_WAITERS;
1217 1217
1218 inc_preempt_count(); 1218 pagefault_disable();
1219 curval = futex_atomic_cmpxchg_inatomic(uaddr, 1219 curval = futex_atomic_cmpxchg_inatomic(uaddr,
1220 uval, newval); 1220 uval, newval);
1221 dec_preempt_count(); 1221 pagefault_enable();
1222 1222
1223 if (unlikely(curval == -EFAULT)) 1223 if (unlikely(curval == -EFAULT))
1224 goto uaddr_faulted; 1224 goto uaddr_faulted;
@@ -1390,9 +1390,9 @@ retry_locked:
1390 * anyone else up: 1390 * anyone else up:
1391 */ 1391 */
1392 if (!(uval & FUTEX_OWNER_DIED)) { 1392 if (!(uval & FUTEX_OWNER_DIED)) {
1393 inc_preempt_count(); 1393 pagefault_disable();
1394 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0); 1394 uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1395 dec_preempt_count(); 1395 pagefault_enable();
1396 } 1396 }
1397 1397
1398 if (unlikely(uval == -EFAULT)) 1398 if (unlikely(uval == -EFAULT))