aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/uaccess.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/uaccess.h')
-rw-r--r--include/linux/uaccess.h49
1 files changed, 46 insertions, 3 deletions
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index a48d7f11c7b..975c963e578 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -1,8 +1,43 @@
1#ifndef __LINUX_UACCESS_H__ 1#ifndef __LINUX_UACCESS_H__
2#define __LINUX_UACCESS_H__ 2#define __LINUX_UACCESS_H__
3 3
4#include <linux/preempt.h>
4#include <asm/uaccess.h> 5#include <asm/uaccess.h>
5 6
7/*
8 * These routines enable/disable the pagefault handler in that
9 * it will not take any locks and go straight to the fixup table.
10 *
11 * They have great resemblance to the preempt_disable/enable calls
12 * and in fact they are identical; this is because currently there is
13 * no other way to make the pagefault handlers do this. So we do
14 * disable preemption but we don't necessarily care about that.
15 */
16static inline void pagefault_disable(void)
17{
18 inc_preempt_count();
19 /*
20 * make sure to have issued the store before a pagefault
21 * can hit.
22 */
23 barrier();
24}
25
26static inline void pagefault_enable(void)
27{
28 /*
29 * make sure to issue those last loads/stores before enabling
30 * the pagefault handler again.
31 */
32 barrier();
33 dec_preempt_count();
34 /*
35 * make sure we do..
36 */
37 barrier();
38 preempt_check_resched();
39}
40
6#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
7 42
8static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@@ -30,14 +65,22 @@ static inline unsigned long __copy_from_user_nocache(void *to,
30 * do_page_fault() doesn't attempt to take mmap_sem. This makes 65 * do_page_fault() doesn't attempt to take mmap_sem. This makes
31 * probe_kernel_address() suitable for use within regions where the caller 66 * probe_kernel_address() suitable for use within regions where the caller
32 * already holds mmap_sem, or other locks which nest inside mmap_sem. 67 * already holds mmap_sem, or other locks which nest inside mmap_sem.
68 * This must be a macro because __get_user() needs to know the types of the
69 * args.
70 *
71 * We don't include enough header files to be able to do the set_fs(). We
72 * require that the probe_kernel_address() caller will do that.
33 */ 73 */
34#define probe_kernel_address(addr, retval) \ 74#define probe_kernel_address(addr, retval) \
35 ({ \ 75 ({ \
36 long ret; \ 76 long ret; \
77 mm_segment_t old_fs = get_fs(); \
37 \ 78 \
38 inc_preempt_count(); \ 79 set_fs(KERNEL_DS); \
39 ret = __get_user(retval, addr); \ 80 pagefault_disable(); \
40 dec_preempt_count(); \ 81 ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \
82 pagefault_enable(); \
83 set_fs(old_fs); \
41 ret; \ 84 ret; \
42 }) 85 })
43 86