aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/uaccess.h39
1 files changed, 37 insertions, 2 deletions
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f11c7be..67918c22339c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
1#ifndef __LINUX_UACCESS_H__ 1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__ 2#define __LINUX_UACCESS_H__
3 3
4#include <linux/preempt.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5 6
7/*
8 * These routines enable/disable the pagefault handler in that
9 * it will not take any locks and go straight to the fixup table.
10 *
11 * They have great resemblance to the preempt_disable/enable calls
12 * and in fact they are identical; this is because currently there is
13 * no other way to make the pagefault handlers do this. So we do
14 * disable preemption but we don't necessarily care about that.
15 */
16static inline void pagefault_disable(void)
17{
18 inc_preempt_count();
19 /*
20 * make sure to have issued the store before a pagefault
21 * can hit.
22 */
23 barrier();
24}
25
26static inline void pagefault_enable(void)
27{
28 /*
29 * make sure to issue those last loads/stores before enabling
30 * the pagefault handler again.
31 */
32 barrier();
33 dec_preempt_count();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched();
39}
40
6#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
7 42
8static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -35,9 +70,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
35 ({ \ 70 ({ \
36 long ret; \ 71 long ret; \
37 \ 72 \
38 inc_preempt_count(); \ 73 pagefault_disable(); \
39 ret = __get_user(retval, addr); \ 74 ret = __get_user(retval, addr); \
40 dec_preempt_count(); \ 75 pagefault_enable(); \
41 ret; \ 76 ret; \
42 }) 77 })
43 78