aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2015-09-11 14:18:28 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-09-11 14:18:28 -0400
commitc2172ce2303051764829d4958bd50a11ada0590f (patch)
treec465594496072249d2ad8ee4642f7c5dcd57e139
parenta4a5a7379e4ca03c192b732d61e446994eb67bbc (diff)
parent0b61f2c0f37983c98ed4207f3f5e265938371b68 (diff)
Merge branch 'uaccess' into fixes
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/include/asm/assembler.h42
-rw-r--r--arch/arm/include/asm/domain.h57
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/thread_info.h4
-rw-r--r--arch/arm/include/asm/uaccess.h85
-rw-r--r--arch/arm/kernel/armksyms.c6
-rw-r--r--arch/arm/kernel/entry-armv.S32
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/entry-header.S112
-rw-r--r--arch/arm/kernel/head.S5
-rw-r--r--arch/arm/kernel/process.c51
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/lib/clear_user.S6
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm/mm/pgd.c10
-rw-r--r--arch/arm/nwfpe/entry.S3
-rw-r--r--arch/arm/xen/hypercall.S15
31 files changed, 398 insertions, 139 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1c5021002fe4..a7a2e328edf9 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1700,6 +1700,21 @@ config HIGHPTE
1700 consumed by page tables. Setting this option will allow 1700 consumed by page tables. Setting this option will allow
1701 user-space 2nd level page tables to reside in high memory. 1701 user-space 2nd level page tables to reside in high memory.
1702 1702
1703config CPU_SW_DOMAIN_PAN
1704 bool "Enable use of CPU domains to implement privileged no-access"
1705 depends on MMU && !ARM_LPAE
1706 default y
1707 help
1708 Increase kernel security by ensuring that normal kernel accesses
1709 are unable to access userspace addresses. This can help prevent
1710 use-after-free bugs becoming an exploitable privilege escalation
1711 by ensuring that magic values (such as LIST_POISON) will always
1712 fault when dereferenced.
1713
1714 CPUs with low-vector mappings use a best-efforts implementation.
1715 Their lower 1MB needs to remain accessible for the vectors, but
1716 the remainder of userspace will become appropriately inaccessible.
1717
1703config HW_PERF_EVENTS 1718config HW_PERF_EVENTS
1704 bool "Enable hardware performance counter support for perf events" 1719 bool "Enable hardware performance counter support for perf events"
1705 depends on PERF_EVENTS 1720 depends on PERF_EVENTS
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 4abe57279c66..9007c518d1d8 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -445,6 +445,48 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
445#endif 445#endif
446 .endm 446 .endm
447 447
448 .macro uaccess_disable, tmp, isb=1
449#ifdef CONFIG_CPU_SW_DOMAIN_PAN
450 /*
451 * Whenever we re-enter userspace, the domains should always be
452 * set appropriately.
453 */
454 mov \tmp, #DACR_UACCESS_DISABLE
455 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
456 .if \isb
457 instr_sync
458 .endif
459#endif
460 .endm
461
462 .macro uaccess_enable, tmp, isb=1
463#ifdef CONFIG_CPU_SW_DOMAIN_PAN
464 /*
465 * Whenever we re-enter userspace, the domains should always be
466 * set appropriately.
467 */
468 mov \tmp, #DACR_UACCESS_ENABLE
469 mcr p15, 0, \tmp, c3, c0, 0
470 .if \isb
471 instr_sync
472 .endif
473#endif
474 .endm
475
476 .macro uaccess_save, tmp
477#ifdef CONFIG_CPU_SW_DOMAIN_PAN
478 mrc p15, 0, \tmp, c3, c0, 0
479 str \tmp, [sp, #S_FRAME_SIZE]
480#endif
481 .endm
482
483 .macro uaccess_restore
484#ifdef CONFIG_CPU_SW_DOMAIN_PAN
485 ldr r0, [sp, #S_FRAME_SIZE]
486 mcr p15, 0, r0, c3, c0, 0
487#endif
488 .endm
489
448 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo 490 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
449 .macro ret\c, reg 491 .macro ret\c, reg
450#if __LINUX_ARM_ARCH__ < 6 492#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..fc8ba1663601 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -12,6 +12,7 @@
12 12
13#ifndef __ASSEMBLY__ 13#ifndef __ASSEMBLY__
14#include <asm/barrier.h> 14#include <asm/barrier.h>
15#include <asm/thread_info.h>
15#endif 16#endif
16 17
17/* 18/*
@@ -34,15 +35,14 @@
34 */ 35 */
35#ifndef CONFIG_IO_36 36#ifndef CONFIG_IO_36
36#define DOMAIN_KERNEL 0 37#define DOMAIN_KERNEL 0
37#define DOMAIN_TABLE 0
38#define DOMAIN_USER 1 38#define DOMAIN_USER 1
39#define DOMAIN_IO 2 39#define DOMAIN_IO 2
40#else 40#else
41#define DOMAIN_KERNEL 2 41#define DOMAIN_KERNEL 2
42#define DOMAIN_TABLE 2
43#define DOMAIN_USER 1 42#define DOMAIN_USER 1
44#define DOMAIN_IO 0 43#define DOMAIN_IO 0
45#endif 44#endif
45#define DOMAIN_VECTORS 3
46 46
47/* 47/*
48 * Domain types 48 * Domain types
@@ -55,30 +55,65 @@
55#define DOMAIN_MANAGER 1 55#define DOMAIN_MANAGER 1
56#endif 56#endif
57 57
58#define domain_val(dom,type) ((type) << (2*(dom))) 58#define domain_mask(dom) ((3) << (2 * (dom)))
59#define domain_val(dom,type) ((type) << (2 * (dom)))
60
61#ifdef CONFIG_CPU_SW_DOMAIN_PAN
62#define DACR_INIT \
63 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
64 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
65 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
66 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
67#else
68#define DACR_INIT \
69 (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
70 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
71 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
72 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
73#endif
74
75#define __DACR_DEFAULT \
76 domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
77 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
78 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
79
80#define DACR_UACCESS_DISABLE \
81 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
82#define DACR_UACCESS_ENABLE \
83 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
59 84
60#ifndef __ASSEMBLY__ 85#ifndef __ASSEMBLY__
61 86
62#ifdef CONFIG_CPU_USE_DOMAINS 87static inline unsigned int get_domain(void)
88{
89 unsigned int domain;
90
91 asm(
92 "mrc p15, 0, %0, c3, c0 @ get domain"
93 : "=r" (domain)
94 : "m" (current_thread_info()->cpu_domain));
95
96 return domain;
97}
98
63static inline void set_domain(unsigned val) 99static inline void set_domain(unsigned val)
64{ 100{
65 asm volatile( 101 asm volatile(
66 "mcr p15, 0, %0, c3, c0 @ set domain" 102 "mcr p15, 0, %0, c3, c0 @ set domain"
67 : : "r" (val)); 103 : : "r" (val) : "memory");
68 isb(); 104 isb();
69} 105}
70 106
107#ifdef CONFIG_CPU_USE_DOMAINS
71#define modify_domain(dom,type) \ 108#define modify_domain(dom,type) \
72 do { \ 109 do { \
73 struct thread_info *thread = current_thread_info(); \ 110 unsigned int domain = get_domain(); \
74 unsigned int domain = thread->cpu_domain; \ 111 domain &= ~domain_mask(dom); \
75 domain &= ~domain_val(dom, DOMAIN_MANAGER); \ 112 domain = domain | domain_val(dom, type); \
76 thread->cpu_domain = domain | domain_val(dom, type); \ 113 set_domain(domain); \
77 set_domain(thread->cpu_domain); \
78 } while (0) 114 } while (0)
79 115
80#else 116#else
81static inline void set_domain(unsigned val) { }
82static inline void modify_domain(unsigned dom, unsigned type) { } 117static inline void modify_domain(unsigned dom, unsigned type) { }
83#endif 118#endif
84 119
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
22#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
23 23
24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 24#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
25({ \
26 unsigned int __ua_flags; \
25 smp_mb(); \ 27 smp_mb(); \
26 prefetchw(uaddr); \ 28 prefetchw(uaddr); \
29 __ua_flags = uaccess_save_and_enable(); \
27 __asm__ __volatile__( \ 30 __asm__ __volatile__( \
28 "1: ldrex %1, [%3]\n" \ 31 "1: ldrex %1, [%3]\n" \
29 " " insn "\n" \ 32 " " insn "\n" \
@@ -34,12 +37,15 @@
34 __futex_atomic_ex_table("%5") \ 37 __futex_atomic_ex_table("%5") \
35 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 38 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
36 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 39 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
37 : "cc", "memory") 40 : "cc", "memory"); \
41 uaccess_restore(__ua_flags); \
42})
38 43
39static inline int 44static inline int
40futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 45futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
41 u32 oldval, u32 newval) 46 u32 oldval, u32 newval)
42{ 47{
48 unsigned int __ua_flags;
43 int ret; 49 int ret;
44 u32 val; 50 u32 val;
45 51
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
49 smp_mb(); 55 smp_mb();
50 /* Prefetching cannot fault */ 56 /* Prefetching cannot fault */
51 prefetchw(uaddr); 57 prefetchw(uaddr);
58 __ua_flags = uaccess_save_and_enable();
52 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 59 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
53 "1: ldrex %1, [%4]\n" 60 "1: ldrex %1, [%4]\n"
54 " teq %1, %2\n" 61 " teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
61 : "=&r" (ret), "=&r" (val) 68 : "=&r" (ret), "=&r" (val)
62 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 69 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
63 : "cc", "memory"); 70 : "cc", "memory");
71 uaccess_restore(__ua_flags);
64 smp_mb(); 72 smp_mb();
65 73
66 *uval = val; 74 *uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
73#include <asm/domain.h> 81#include <asm/domain.h>
74 82
75#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ 83#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
84({ \
85 unsigned int __ua_flags = uaccess_save_and_enable(); \
76 __asm__ __volatile__( \ 86 __asm__ __volatile__( \
77 "1: " TUSER(ldr) " %1, [%3]\n" \ 87 "1: " TUSER(ldr) " %1, [%3]\n" \
78 " " insn "\n" \ 88 " " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
81 __futex_atomic_ex_table("%5") \ 91 __futex_atomic_ex_table("%5") \
82 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ 92 : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
83 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ 93 : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
84 : "cc", "memory") 94 : "cc", "memory"); \
95 uaccess_restore(__ua_flags); \
96})
85 97
86static inline int 98static inline int
87futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 99futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
88 u32 oldval, u32 newval) 100 u32 oldval, u32 newval)
89{ 101{
102 unsigned int __ua_flags;
90 int ret = 0; 103 int ret = 0;
91 u32 val; 104 u32 val;
92 105
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
94 return -EFAULT; 107 return -EFAULT;
95 108
96 preempt_disable(); 109 preempt_disable();
110 __ua_flags = uaccess_save_and_enable();
97 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" 111 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
98 "1: " TUSER(ldr) " %1, [%4]\n" 112 "1: " TUSER(ldr) " %1, [%4]\n"
99 " teq %1, %2\n" 113 " teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
103 : "+r" (ret), "=&r" (val) 117 : "+r" (ret), "=&r" (val)
104 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) 118 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
105 : "cc", "memory"); 119 : "cc", "memory");
120 uaccess_restore(__ua_flags);
106 121
107 *uval = val; 122 *uval = val;
108 preempt_enable(); 123 preempt_enable();
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */ 23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) 24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) 25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
26#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
26#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ 27#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
27/* 28/*
28 * - section 29 * - section
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index bd32eded3e50..ae02e68b61fc 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -25,7 +25,6 @@
25struct task_struct; 25struct task_struct;
26 26
27#include <asm/types.h> 27#include <asm/types.h>
28#include <asm/domain.h>
29 28
30typedef unsigned long mm_segment_t; 29typedef unsigned long mm_segment_t;
31 30
@@ -74,9 +73,6 @@ struct thread_info {
74 .flags = 0, \ 73 .flags = 0, \
75 .preempt_count = INIT_PREEMPT_COUNT, \ 74 .preempt_count = INIT_PREEMPT_COUNT, \
76 .addr_limit = KERNEL_DS, \ 75 .addr_limit = KERNEL_DS, \
77 .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
78 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
79 domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
80} 76}
81 77
82#define init_thread_info (init_thread_union.thread_info) 78#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 74b17d09ef7a..01bae13b2cea 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
50extern int fixup_exception(struct pt_regs *regs); 50extern int fixup_exception(struct pt_regs *regs);
51 51
52/* 52/*
53 * These two functions allow hooking accesses to userspace to increase
54 * system integrity by ensuring that the kernel can not inadvertantly
55 * perform such accesses (eg, via list poison values) which could then
56 * be exploited for priviledge escalation.
57 */
58static inline unsigned int uaccess_save_and_enable(void)
59{
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
62
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67 return old_domain;
68#else
69 return 0;
70#endif
71}
72
73static inline void uaccess_restore(unsigned int flags)
74{
75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
77 set_domain(flags);
78#endif
79}
80
81/*
53 * These two are intentionally not defined anywhere - if the kernel 82 * These two are intentionally not defined anywhere - if the kernel
54 * code generates any references to them, that's a bug. 83 * code generates any references to them, that's a bug.
55 */ 84 */
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
165 register typeof(x) __r2 asm("r2"); \ 194 register typeof(x) __r2 asm("r2"); \
166 register unsigned long __l asm("r1") = __limit; \ 195 register unsigned long __l asm("r1") = __limit; \
167 register int __e asm("r0"); \ 196 register int __e asm("r0"); \
197 unsigned int __ua_flags = uaccess_save_and_enable(); \
168 switch (sizeof(*(__p))) { \ 198 switch (sizeof(*(__p))) { \
169 case 1: \ 199 case 1: \
170 if (sizeof((x)) >= 8) \ 200 if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
192 break; \ 222 break; \
193 default: __e = __get_user_bad(); break; \ 223 default: __e = __get_user_bad(); break; \
194 } \ 224 } \
225 uaccess_restore(__ua_flags); \
195 x = (typeof(*(p))) __r2; \ 226 x = (typeof(*(p))) __r2; \
196 __e; \ 227 __e; \
197 }) 228 })
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
224 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \ 255 register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
225 register unsigned long __l asm("r1") = __limit; \ 256 register unsigned long __l asm("r1") = __limit; \
226 register int __e asm("r0"); \ 257 register int __e asm("r0"); \
258 unsigned int __ua_flags = uaccess_save_and_enable(); \
227 switch (sizeof(*(__p))) { \ 259 switch (sizeof(*(__p))) { \
228 case 1: \ 260 case 1: \
229 __put_user_x(__r2, __p, __e, __l, 1); \ 261 __put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
239 break; \ 271 break; \
240 default: __e = __put_user_bad(); break; \ 272 default: __e = __put_user_bad(); break; \
241 } \ 273 } \
274 uaccess_restore(__ua_flags); \
242 __e; \ 275 __e; \
243 }) 276 })
244 277
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
300do { \ 333do { \
301 unsigned long __gu_addr = (unsigned long)(ptr); \ 334 unsigned long __gu_addr = (unsigned long)(ptr); \
302 unsigned long __gu_val; \ 335 unsigned long __gu_val; \
336 unsigned int __ua_flags; \
303 __chk_user_ptr(ptr); \ 337 __chk_user_ptr(ptr); \
304 might_fault(); \ 338 might_fault(); \
339 __ua_flags = uaccess_save_and_enable(); \
305 switch (sizeof(*(ptr))) { \ 340 switch (sizeof(*(ptr))) { \
306 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ 341 case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
307 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ 342 case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
308 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ 343 case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
309 default: (__gu_val) = __get_user_bad(); \ 344 default: (__gu_val) = __get_user_bad(); \
310 } \ 345 } \
346 uaccess_restore(__ua_flags); \
311 (x) = (__typeof__(*(ptr)))__gu_val; \ 347 (x) = (__typeof__(*(ptr)))__gu_val; \
312} while (0) 348} while (0)
313 349
@@ -381,9 +417,11 @@ do { \
381#define __put_user_err(x, ptr, err) \ 417#define __put_user_err(x, ptr, err) \
382do { \ 418do { \
383 unsigned long __pu_addr = (unsigned long)(ptr); \ 419 unsigned long __pu_addr = (unsigned long)(ptr); \
420 unsigned int __ua_flags; \
384 __typeof__(*(ptr)) __pu_val = (x); \ 421 __typeof__(*(ptr)) __pu_val = (x); \
385 __chk_user_ptr(ptr); \ 422 __chk_user_ptr(ptr); \
386 might_fault(); \ 423 might_fault(); \
424 __ua_flags = uaccess_save_and_enable(); \
387 switch (sizeof(*(ptr))) { \ 425 switch (sizeof(*(ptr))) { \
388 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ 426 case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
389 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ 427 case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -391,6 +429,7 @@ do { \
391 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ 429 case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
392 default: __put_user_bad(); \ 430 default: __put_user_bad(); \
393 } \ 431 } \
432 uaccess_restore(__ua_flags); \
394} while (0) 433} while (0)
395 434
396#define __put_user_asm_byte(x, __pu_addr, err) \ 435#define __put_user_asm_byte(x, __pu_addr, err) \
@@ -474,11 +513,46 @@ do { \
474 513
475 514
476#ifdef CONFIG_MMU 515#ifdef CONFIG_MMU
477extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n); 516extern unsigned long __must_check
478extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n); 517arm_copy_from_user(void *to, const void __user *from, unsigned long n);
479extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); 518
480extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); 519static inline unsigned long __must_check
481extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); 520__copy_from_user(void *to, const void __user *from, unsigned long n)
521{
522 unsigned int __ua_flags = uaccess_save_and_enable();
523 n = arm_copy_from_user(to, from, n);
524 uaccess_restore(__ua_flags);
525 return n;
526}
527
528extern unsigned long __must_check
529arm_copy_to_user(void __user *to, const void *from, unsigned long n);
530extern unsigned long __must_check
531__copy_to_user_std(void __user *to, const void *from, unsigned long n);
532
533static inline unsigned long __must_check
534__copy_to_user(void __user *to, const void *from, unsigned long n)
535{
536 unsigned int __ua_flags = uaccess_save_and_enable();
537 n = arm_copy_to_user(to, from, n);
538 uaccess_restore(__ua_flags);
539 return n;
540}
541
542extern unsigned long __must_check
543arm_clear_user(void __user *addr, unsigned long n);
544extern unsigned long __must_check
545__clear_user_std(void __user *addr, unsigned long n);
546
547static inline unsigned long __must_check
548__clear_user(void __user *addr, unsigned long n)
549{
550 unsigned int __ua_flags = uaccess_save_and_enable();
551 n = arm_clear_user(addr, n);
552 uaccess_restore(__ua_flags);
553 return n;
554}
555
482#else 556#else
483#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) 557#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
484#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) 558#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -511,6 +585,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
511 return n; 585 return n;
512} 586}
513 587
588/* These are from lib/ code, and use __get_user() and friends */
514extern long strncpy_from_user(char *dest, const char __user *src, long count); 589extern long strncpy_from_user(char *dest, const char __user *src, long count);
515 590
516extern __must_check long strlen_user(const char __user *str); 591extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5e5a51a99e68..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -97,9 +97,9 @@ EXPORT_SYMBOL(mmiocpy);
97#ifdef CONFIG_MMU 97#ifdef CONFIG_MMU
98EXPORT_SYMBOL(copy_page); 98EXPORT_SYMBOL(copy_page);
99 99
100EXPORT_SYMBOL(__copy_from_user); 100EXPORT_SYMBOL(arm_copy_from_user);
101EXPORT_SYMBOL(__copy_to_user); 101EXPORT_SYMBOL(arm_copy_to_user);
102EXPORT_SYMBOL(__clear_user); 102EXPORT_SYMBOL(arm_clear_user);
103 103
104EXPORT_SYMBOL(__get_user_1); 104EXPORT_SYMBOL(__get_user_1);
105EXPORT_SYMBOL(__get_user_2); 105EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index cb4fb1e69778..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
149#define SPFIX(code...) 149#define SPFIX(code...)
150#endif 150#endif
151 151
152 .macro svc_entry, stack_hole=0, trace=1 152 .macro svc_entry, stack_hole=0, trace=1, uaccess=1
153 UNWIND(.fnstart ) 153 UNWIND(.fnstart )
154 UNWIND(.save {r0 - pc} ) 154 UNWIND(.save {r0 - pc} )
155 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 155 sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
156#ifdef CONFIG_THUMB2_KERNEL 156#ifdef CONFIG_THUMB2_KERNEL
157 SPFIX( str r0, [sp] ) @ temporarily saved 157 SPFIX( str r0, [sp] ) @ temporarily saved
158 SPFIX( mov r0, sp ) 158 SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
167 ldmia r0, {r3 - r5} 167 ldmia r0, {r3 - r5}
168 add r7, sp, #S_SP - 4 @ here for interlock avoidance 168 add r7, sp, #S_SP - 4 @ here for interlock avoidance
169 mov r6, #-1 @ "" "" "" "" 169 mov r6, #-1 @ "" "" "" ""
170 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) 170 add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
171 SPFIX( addeq r2, r2, #4 ) 171 SPFIX( addeq r2, r2, #4 )
172 str r3, [sp, #-4]! @ save the "real" r0 copied 172 str r3, [sp, #-4]! @ save the "real" r0 copied
173 @ from the exception stack 173 @ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
185 @ 185 @
186 stmia r7, {r2 - r6} 186 stmia r7, {r2 - r6}
187 187
188 uaccess_save r0
189 .if \uaccess
190 uaccess_disable r0
191 .endif
192
188 .if \trace 193 .if \trace
189#ifdef CONFIG_TRACE_IRQFLAGS 194#ifdef CONFIG_TRACE_IRQFLAGS
190 bl trace_hardirqs_off 195 bl trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
194 199
195 .align 5 200 .align 5
196__dabt_svc: 201__dabt_svc:
197 svc_entry 202 svc_entry uaccess=0
198 mov r2, sp 203 mov r2, sp
199 dabt_helper 204 dabt_helper
200 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR 205 THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
368#error "sizeof(struct pt_regs) must be a multiple of 8" 373#error "sizeof(struct pt_regs) must be a multiple of 8"
369#endif 374#endif
370 375
371 .macro usr_entry, trace=1 376 .macro usr_entry, trace=1, uaccess=1
372 UNWIND(.fnstart ) 377 UNWIND(.fnstart )
373 UNWIND(.cantunwind ) @ don't unwind the user space 378 UNWIND(.cantunwind ) @ don't unwind the user space
374 sub sp, sp, #S_FRAME_SIZE 379 sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
400 ARM( stmdb r0, {sp, lr}^ ) 405 ARM( stmdb r0, {sp, lr}^ )
401 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 406 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
402 407
408 .if \uaccess
409 uaccess_disable ip
410 .endif
411
403 @ Enable the alignment trap while in kernel mode 412 @ Enable the alignment trap while in kernel mode
404 ATRAP( teq r8, r7) 413 ATRAP( teq r8, r7)
405 ATRAP( mcrne p15, 0, r8, c1, c0, 0) 414 ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
435 444
436 .align 5 445 .align 5
437__dabt_usr: 446__dabt_usr:
438 usr_entry 447 usr_entry uaccess=0
439 kuser_cmpxchg_check 448 kuser_cmpxchg_check
440 mov r2, sp 449 mov r2, sp
441 dabt_helper 450 dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
458 467
459 .align 5 468 .align 5
460__und_usr: 469__und_usr:
461 usr_entry 470 usr_entry uaccess=0
462 471
463 mov r2, r4 472 mov r2, r4
464 mov r3, r5 473 mov r3, r5
@@ -484,6 +493,8 @@ __und_usr:
4841: ldrt r0, [r4] 4931: ldrt r0, [r4]
485 ARM_BE8(rev r0, r0) @ little endian instruction 494 ARM_BE8(rev r0, r0) @ little endian instruction
486 495
496 uaccess_disable ip
497
487 @ r0 = 32-bit ARM instruction which caused the exception 498 @ r0 = 32-bit ARM instruction which caused the exception
488 @ r2 = PC value for the following instruction (:= regs->ARM_pc) 499 @ r2 = PC value for the following instruction (:= regs->ARM_pc)
489 @ r4 = PC value for the faulting instruction 500 @ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
5182: ldrht r5, [r4] 5292: ldrht r5, [r4]
519ARM_BE8(rev16 r5, r5) @ little endian instruction 530ARM_BE8(rev16 r5, r5) @ little endian instruction
520 cmp r5, #0xe800 @ 32bit instruction if xx != 0 531 cmp r5, #0xe800 @ 32bit instruction if xx != 0
521 blo __und_usr_fault_16 @ 16bit undefined instruction 532 blo __und_usr_fault_16_pan @ 16bit undefined instruction
5223: ldrht r0, [r2] 5333: ldrht r0, [r2]
523ARM_BE8(rev16 r0, r0) @ little endian instruction 534ARM_BE8(rev16 r0, r0) @ little endian instruction
535 uaccess_disable ip
524 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 536 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
525 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update 537 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
526 orr r0, r0, r5, lsl #16 538 orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
715__und_usr_fault_32: 727__und_usr_fault_32:
716 mov r1, #4 728 mov r1, #4
717 b 1f 729 b 1f
730__und_usr_fault_16_pan:
731 uaccess_disable ip
718__und_usr_fault_16: 732__und_usr_fault_16:
719 mov r1, #2 733 mov r1, #2
7201: mov r0, sp 7341: mov r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
770 ldr r4, [r2, #TI_TP_VALUE] 784 ldr r4, [r2, #TI_TP_VALUE]
771 ldr r5, [r2, #TI_TP_VALUE + 4] 785 ldr r5, [r2, #TI_TP_VALUE + 4]
772#ifdef CONFIG_CPU_USE_DOMAINS 786#ifdef CONFIG_CPU_USE_DOMAINS
787 mrc p15, 0, r6, c3, c0, 0 @ Get domain register
788 str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
773 ldr r6, [r2, #TI_CPU_DOMAIN] 789 ldr r6, [r2, #TI_CPU_DOMAIN]
774#endif 790#endif
775 switch_tls r1, r4, r5, r3, r7 791 switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b48dd4f37f80..61974dfba132 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -174,6 +174,8 @@ ENTRY(vector_swi)
174 USER( ldr scno, [lr, #-4] ) @ get SWI instruction 174 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
175#endif 175#endif
176 176
177 uaccess_disable tbl
178
177 adr tbl, sys_call_table @ load syscall table pointer 179 adr tbl, sys_call_table @ load syscall table pointer
178 180
179#if defined(CONFIG_OABI_COMPAT) 181#if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..0d22ad206d52 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
196 msr cpsr_c, \rtemp @ switch back to the SVC mode 196 msr cpsr_c, \rtemp @ switch back to the SVC mode
197 .endm 197 .endm
198 198
199#ifndef CONFIG_THUMB2_KERNEL 199
200 .macro svc_exit, rpsr, irq = 0 200 .macro svc_exit, rpsr, irq = 0
201 .if \irq != 0 201 .if \irq != 0
202 @ IRQs already off 202 @ IRQs already off
@@ -215,6 +215,10 @@
215 blne trace_hardirqs_off 215 blne trace_hardirqs_off
216#endif 216#endif
217 .endif 217 .endif
218 uaccess_restore
219
220#ifndef CONFIG_THUMB2_KERNEL
221 @ ARM mode SVC restore
218 msr spsr_cxsf, \rpsr 222 msr spsr_cxsf, \rpsr
219#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) 223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
220 @ We must avoid clrex due to Cortex-A15 erratum #830321 224 @ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
222 strex r1, r2, [r0] @ clear the exclusive monitor 226 strex r1, r2, [r0] @ clear the exclusive monitor
223#endif 227#endif
224 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr 228 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
229#else
230 @ Thumb mode SVC restore
231 ldr lr, [sp, #S_SP] @ top of the stack
232 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
233
234 @ We must avoid clrex due to Cortex-A15 erratum #830321
235 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
236
237 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
238 ldmia sp, {r0 - r12}
239 mov sp, lr
240 ldr lr, [sp], #4
241 rfeia sp!
242#endif
225 .endm 243 .endm
226 244
227 @ 245 @
@@ -241,6 +259,9 @@
241 @ on the stack remains correct). 259 @ on the stack remains correct).
242 @ 260 @
243 .macro svc_exit_via_fiq 261 .macro svc_exit_via_fiq
262 uaccess_restore
263#ifndef CONFIG_THUMB2_KERNEL
264 @ ARM mode restore
244 mov r0, sp 265 mov r0, sp
245 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will 266 ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
246 @ clobber state restored below) 267 @ clobber state restored below)
@@ -250,9 +271,27 @@
250 msr spsr_cxsf, r9 271 msr spsr_cxsf, r9
251 ldr r0, [r0, #S_R0] 272 ldr r0, [r0, #S_R0]
252 ldmia r8, {pc}^ 273 ldmia r8, {pc}^
274#else
275 @ Thumb mode restore
276 add r0, sp, #S_R2
277 ldr lr, [sp, #S_LR]
278 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
279 @ clobber state restored below)
280 ldmia r0, {r2 - r12}
281 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
282 msr cpsr_c, r1
283 sub r0, #S_R2
284 add r8, r0, #S_PC
285 ldmia r0, {r0 - r1}
286 rfeia r8
287#endif
253 .endm 288 .endm
254 289
290
255 .macro restore_user_regs, fast = 0, offset = 0 291 .macro restore_user_regs, fast = 0, offset = 0
292 uaccess_enable r1, isb=0
293#ifndef CONFIG_THUMB2_KERNEL
294 @ ARM mode restore
256 mov r2, sp 295 mov r2, sp
257 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr 296 ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
258 ldr lr, [r2, #\offset + S_PC]! @ get pc 297 ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
270 @ after ldm {}^ 309 @ after ldm {}^
271 add sp, sp, #\offset + S_FRAME_SIZE 310 add sp, sp, #\offset + S_FRAME_SIZE
272 movs pc, lr @ return & move spsr_svc into cpsr 311 movs pc, lr @ return & move spsr_svc into cpsr
273 .endm 312#elif defined(CONFIG_CPU_V7M)
274 313 @ V7M restore.
275#else /* CONFIG_THUMB2_KERNEL */ 314 @ Note that we don't need to do clrex here as clearing the local
276 .macro svc_exit, rpsr, irq = 0 315 @ monitor is part of the exception entry and exit sequence.
277 .if \irq != 0
278 @ IRQs already off
279#ifdef CONFIG_TRACE_IRQFLAGS
280 @ The parent context IRQs must have been enabled to get here in
281 @ the first place, so there's no point checking the PSR I bit.
282 bl trace_hardirqs_on
283#endif
284 .else
285 @ IRQs off again before pulling preserved data off the stack
286 disable_irq_notrace
287#ifdef CONFIG_TRACE_IRQFLAGS
288 tst \rpsr, #PSR_I_BIT
289 bleq trace_hardirqs_on
290 tst \rpsr, #PSR_I_BIT
291 blne trace_hardirqs_off
292#endif
293 .endif
294 ldr lr, [sp, #S_SP] @ top of the stack
295 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
296
297 @ We must avoid clrex due to Cortex-A15 erratum #830321
298 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
299
300 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
301 ldmia sp, {r0 - r12}
302 mov sp, lr
303 ldr lr, [sp], #4
304 rfeia sp!
305 .endm
306
307 @
308 @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
309 @
310 @ For full details see non-Thumb implementation above.
311 @
312 .macro svc_exit_via_fiq
313 add r0, sp, #S_R2
314 ldr lr, [sp, #S_LR]
315 ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
316 @ clobber state restored below)
317 ldmia r0, {r2 - r12}
318 mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
319 msr cpsr_c, r1
320 sub r0, #S_R2
321 add r8, r0, #S_PC
322 ldmia r0, {r0 - r1}
323 rfeia r8
324 .endm
325
326#ifdef CONFIG_CPU_V7M
327 /*
328 * Note we don't need to do clrex here as clearing the local monitor is
329 * part of each exception entry and exit sequence.
330 */
331 .macro restore_user_regs, fast = 0, offset = 0
332 .if \offset 316 .if \offset
333 add sp, #\offset 317 add sp, #\offset
334 .endif 318 .endif
335 v7m_exception_slow_exit ret_r0 = \fast 319 v7m_exception_slow_exit ret_r0 = \fast
336 .endm 320#else
337#else /* ifdef CONFIG_CPU_V7M */ 321 @ Thumb mode restore
338 .macro restore_user_regs, fast = 0, offset = 0
339 mov r2, sp 322 mov r2, sp
340 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr 323 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
341 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr 324 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
353 .endif 336 .endif
354 add sp, sp, #S_FRAME_SIZE - S_SP 337 add sp, sp, #S_FRAME_SIZE - S_SP
355 movs pc, lr @ return & move spsr_svc into cpsr 338 movs pc, lr @ return & move spsr_svc into cpsr
356 .endm
357#endif /* ifdef CONFIG_CPU_V7M / else */
358#endif /* !CONFIG_THUMB2_KERNEL */ 339#endif /* !CONFIG_THUMB2_KERNEL */
340 .endm
359 341
360/* 342/*
361 * Context tracking subsystem. Used to instrument transitions 343 * Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 29e2991465cb..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -464,10 +464,7 @@ __enable_mmu:
464#ifdef CONFIG_ARM_LPAE 464#ifdef CONFIG_ARM_LPAE
465 mcrr p15, 0, r4, r5, c2 @ load TTBR0 465 mcrr p15, 0, r4, r5, c2 @ load TTBR0
466#else 466#else
467 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 467 mov r5, #DACR_INIT
468 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
469 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
470 domain_val(DOMAIN_IO, DOMAIN_CLIENT))
471 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 468 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
472 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 469 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
473#endif 470#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..e550a4541f48 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
129 buf[4] = '\0'; 129 buf[4] = '\0';
130 130
131#ifndef CONFIG_CPU_V7M 131#ifndef CONFIG_CPU_V7M
132 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 132 {
133 buf, interrupts_enabled(regs) ? "n" : "ff", 133 unsigned int domain = get_domain();
134 fast_interrupts_enabled(regs) ? "n" : "ff", 134 const char *segment;
135 processor_modes[processor_mode(regs)], 135
136 isa_modes[isa_mode(regs)], 136#ifdef CONFIG_CPU_SW_DOMAIN_PAN
137 get_fs() == get_ds() ? "kernel" : "user"); 137 /*
138 * Get the domain register for the parent context. In user
139 * mode, we don't save the DACR, so lets use what it should
140 * be. For other modes, we place it after the pt_regs struct.
141 */
142 if (user_mode(regs))
143 domain = DACR_UACCESS_ENABLE;
144 else
145 domain = *(unsigned int *)(regs + 1);
146#endif
147
148 if ((domain & domain_mask(DOMAIN_USER)) ==
149 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
150 segment = "none";
151 else if (get_fs() == get_ds())
152 segment = "kernel";
153 else
154 segment = "user";
155
156 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
157 buf, interrupts_enabled(regs) ? "n" : "ff",
158 fast_interrupts_enabled(regs) ? "n" : "ff",
159 processor_modes[processor_mode(regs)],
160 isa_modes[isa_mode(regs)], segment);
161 }
138#else 162#else
139 printk("xPSR: %08lx\n", regs->ARM_cpsr); 163 printk("xPSR: %08lx\n", regs->ARM_cpsr);
140#endif 164#endif
@@ -146,10 +170,9 @@ void __show_regs(struct pt_regs *regs)
146 buf[0] = '\0'; 170 buf[0] = '\0';
147#ifdef CONFIG_CPU_CP15_MMU 171#ifdef CONFIG_CPU_CP15_MMU
148 { 172 {
149 unsigned int transbase, dac; 173 unsigned int transbase, dac = get_domain();
150 asm("mrc p15, 0, %0, c2, c0\n\t" 174 asm("mrc p15, 0, %0, c2, c0\n\t"
151 "mrc p15, 0, %1, c3, c0\n" 175 : "=r" (transbase));
152 : "=r" (transbase), "=r" (dac));
153 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 176 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
154 transbase, dac); 177 transbase, dac);
155 } 178 }
@@ -210,6 +233,16 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
210 233
211 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); 234 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
212 235
236#ifdef CONFIG_CPU_USE_DOMAINS
237 /*
238 * Copy the initial value of the domain access control register
239 * from the current thread: thread->addr_limit will have been
240 * copied from the current thread via setup_thread_stack() in
241 * kernel/fork.c
242 */
243 thread->cpu_domain = get_domain();
244#endif
245
213 if (likely(!(p->flags & PF_KTHREAD))) { 246 if (likely(!(p->flags & PF_KTHREAD))) {
214 *childregs = *current_pt_regs(); 247 *childregs = *current_pt_regs();
215 childregs->ARM_r0 = 0; 248 childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
141 141
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 unsigned int __ua_flags;
144 145
146 __ua_flags = uaccess_save_and_enable();
145 if (type == TYPE_SWPB) 147 if (type == TYPE_SWPB)
146 __user_swpb_asm(*data, address, res, temp); 148 __user_swpb_asm(*data, address, res, temp);
147 else 149 else
148 __user_swp_asm(*data, address, res, temp); 150 __user_swp_asm(*data, address, res, temp);
151 uaccess_restore(__ua_flags);
149 152
150 if (likely(res != -EAGAIN) || signal_pending(current)) 153 if (likely(res != -EAGAIN) || signal_pending(current))
151 break; 154 break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
870 kuser_init(vectors_base); 870 kuser_init(vectors_base);
871 871
872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2); 872 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
873 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
874#else /* ifndef CONFIG_CPU_V7M */ 873#else /* ifndef CONFIG_CPU_V7M */
875 /* 874 /*
876 * on V7-M there is no need to copy the vector table to a dedicated 875 * on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
12 12
13 .text 13 .text
14 14
15/* Prototype: int __clear_user(void *addr, size_t sz) 15/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
16 * Purpose : clear some user memory 16 * Purpose : clear some user memory
17 * Params : addr - user memory address to clear 17 * Params : addr - user memory address to clear
18 * : sz - number of bytes to clear 18 * : sz - number of bytes to clear
19 * Returns : number of bytes NOT cleared 19 * Returns : number of bytes NOT cleared
20 */ 20 */
21ENTRY(__clear_user_std) 21ENTRY(__clear_user_std)
22WEAK(__clear_user) 22WEAK(arm_clear_user)
23 stmfd sp!, {r1, lr} 23 stmfd sp!, {r1, lr}
24 mov r2, #0 24 mov r2, #0
25 cmp r1, #4 25 cmp r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
44USER( strnebt r2, [r0]) 44USER( strnebt r2, [r0])
45 mov r0, #0 45 mov r0, #0
46 ldmfd sp!, {r1, pc} 46 ldmfd sp!, {r1, pc}
47ENDPROC(__clear_user) 47ENDPROC(arm_clear_user)
48ENDPROC(__clear_user_std) 48ENDPROC(__clear_user_std)
49 49
50 .pushsection .text.fixup,"ax" 50 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_from_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_from_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -89,11 +89,11 @@
89 89
90 .text 90 .text
91 91
92ENTRY(__copy_from_user) 92ENTRY(arm_copy_from_user)
93 93
94#include "copy_template.S" 94#include "copy_template.S"
95 95
96ENDPROC(__copy_from_user) 96ENDPROC(arm_copy_from_user)
97 97
98 .pushsection .fixup,"ax" 98 .pushsection .fixup,"ax"
99 .align 0 99 .align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
17/* 17/*
18 * Prototype: 18 * Prototype:
19 * 19 *
20 * size_t __copy_to_user(void *to, const void *from, size_t n) 20 * size_t arm_copy_to_user(void *to, const void *from, size_t n)
21 * 21 *
22 * Purpose: 22 * Purpose:
23 * 23 *
@@ -93,11 +93,11 @@
93 .text 93 .text
94 94
95ENTRY(__copy_to_user_std) 95ENTRY(__copy_to_user_std)
96WEAK(__copy_to_user) 96WEAK(arm_copy_to_user)
97 97
98#include "copy_template.S" 98#include "copy_template.S"
99 99
100ENDPROC(__copy_to_user) 100ENDPROC(arm_copy_to_user)
101ENDPROC(__copy_to_user_std) 101ENDPROC(__copy_to_user_std)
102 102
103 .pushsection .text.fixup,"ax" 103 .pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
17 17
18 .text 18 .text
19 19
20#ifdef CONFIG_CPU_SW_DOMAIN_PAN
21 .macro save_regs
22 mrc p15, 0, ip, c3, c0, 0
23 stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24 uaccess_enable ip
25 .endm
26
27 .macro load_regs
28 ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29 mcr p15, 0, ip, c3, c0, 0
30 ret lr
31 .endm
32#else
20 .macro save_regs 33 .macro save_regs
21 stmfd sp!, {r1, r2, r4 - r8, lr} 34 stmfd sp!, {r1, r2, r4 - r8, lr}
22 .endm 35 .endm
@@ -24,6 +37,7 @@
24 .macro load_regs 37 .macro load_regs
25 ldmfd sp!, {r1, r2, r4 - r8, pc} 38 ldmfd sp!, {r1, r2, r4 - r8, pc}
26 .endm 39 .endm
40#endif
27 41
28 .macro load1b, reg1 42 .macro load1b, reg1
29 ldrusr \reg1, r0, 1 43 ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 4b39af2dfda9..d72b90905132 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -136,7 +136,7 @@ out:
136} 136}
137 137
138unsigned long 138unsigned long
139__copy_to_user(void __user *to, const void *from, unsigned long n) 139arm_copy_to_user(void __user *to, const void *from, unsigned long n)
140{ 140{
141 /* 141 /*
142 * This test is stubbed out of the main function above to keep 142 * This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
190 return n; 190 return n;
191} 191}
192 192
193unsigned long __clear_user(void __user *addr, unsigned long n) 193unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 194{
195 /* See rational for this in __copy_to_user() above. */ 195 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 196 if (n < 64)
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
19 mrc p15, 0, r1, c5, c0, 0 @ get FSR 19 mrc p15, 0, r1, c5, c0, 0 @ get FSR
20 mrc p15, 0, r0, c6, c0, 0 @ get FAR 20 mrc p15, 0, r0, c6, c0, 0 @ get FAR
21 ldr r3, [r4] @ read aborted ARM instruction 21 ldr r3, [r4] @ read aborted ARM instruction
22 uaccess_disable ip @ disable userspace access
22 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 23 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
23 tst r3, #1 << 20 @ L = 1 -> write? 24 tst r3, #1 << 20 @ L = 1 -> write?
24 orreq r1, r1, #1 << 11 @ yes. 25 orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
21 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
23 ldreq r3, [r4] @ read aborted ARM instruction 23 ldreq r3, [r4] @ read aborted ARM instruction
24 uaccess_disable ip @ disable user access
24 bic r1, r1, #1 << 11 @ clear bits 11 of FSR 25 bic r1, r1, #1 << 11 @ clear bits 11 of FSR
25 do_ldrd_abort tmp=ip, insn=r3 26 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
27 beq do_DataAbort @ yes
26 tst r3, #1 << 20 @ check write 28 tst r3, #1 << 20 @ check write
27 orreq r1, r1, #1 << 11 29 orreq r1, r1, #1 << 11
28 b do_DataAbort 30 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
24 bne do_DataAbort 24 bne do_DataAbort
25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
26 ldreq r3, [r4] @ read aborted ARM instruction 26 ldreq r3, [r4] @ read aborted ARM instruction
27 do_ldrd_abort tmp=ip, insn=r3 27 uaccess_disable ip @ disable userspace access
28 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
29 beq do_DataAbort @ yes
28 tst r3, #1 << 20 @ L = 0 -> write 30 tst r3, #1 << 20 @ L = 0 -> write
29 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
30 b do_DataAbort 32 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
26 ldr ip, =0x4107b36 26 ldr ip, =0x4107b36
27 mrc p15, 0, r3, c0, c0, 0 @ get processor id 27 mrc p15, 0, r3, c0, c0, 0 @ get processor id
28 teq ip, r3, lsr #4 @ r0 ARM1136? 28 teq ip, r3, lsr #4 @ r0 ARM1136?
29 bne do_DataAbort 29 bne 1f
30 tst r5, #PSR_J_BIT @ Java? 30 tst r5, #PSR_J_BIT @ Java?
31 tsteq r5, #PSR_T_BIT @ Thumb? 31 tsteq r5, #PSR_T_BIT @ Thumb?
32 bne do_DataAbort 32 bne 1f
33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 33 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
34 ldr r3, [r4] @ read aborted ARM instruction 34 ldr r3, [r4] @ read aborted ARM instruction
35 ARM_BE8(rev r3, r3) 35 ARM_BE8(rev r3, r3)
36 36
37 do_ldrd_abort tmp=ip, insn=r3 37 teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
38 beq 1f @ yes
38 tst r3, #1 << 20 @ L = 0 -> write 39 tst r3, #1 << 20 @ L = 0 -> write
39 orreq r1, r1, #1 << 11 @ yes. 40 orreq r1, r1, #1 << 11 @ yes.
40#endif 41#endif
421: uaccess_disable ip @ disable userspace access
41 b do_DataAbort 43 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
15ENTRY(v7_early_abort) 15ENTRY(v7_early_abort)
16 mrc p15, 0, r1, c5, c0, 0 @ get FSR 16 mrc p15, 0, r1, c5, c0, 0 @ get FSR
17 mrc p15, 0, r0, c6, c0, 0 @ get FAR 17 mrc p15, 0, r0, c6, c0, 0 @ get FAR
18 uaccess_disable ip @ disable userspace access
18 19
19 /* 20 /*
20 * V6 code adjusts the returned DFSR. 21 * V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
26#endif 26#endif
27 bne .data_thumb_abort 27 bne .data_thumb_abort
28 ldr r8, [r4] @ read arm instruction 28 ldr r8, [r4] @ read arm instruction
29 uaccess_disable ip @ disable userspace access
29 tst r8, #1 << 20 @ L = 1 -> write? 30 tst r8, #1 << 20 @ L = 1 -> write?
30 orreq r1, r1, #1 << 11 @ yes. 31 orreq r1, r1, #1 << 11 @ yes.
31 and r7, r8, #15 << 24 32 and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
155 156
156.data_thumb_abort: 157.data_thumb_abort:
157 ldrh r8, [r4] @ read instruction 158 ldrh r8, [r4] @ read instruction
159 uaccess_disable ip @ disable userspace access
158 tst r8, #1 << 11 @ L = 1 -> write? 160 tst r8, #1 << 11 @ L = 1 -> write?
159 orreq r1, r1, #1 << 8 @ yes 161 orreq r1, r1, #1 << 8 @ yes
160 and r7, r8, #15 << 12 162 and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
13 tst \psr, #PSR_T_BIT 13 tst \psr, #PSR_T_BIT
14 beq not_thumb 14 beq not_thumb
15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction 15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction
16 uaccess_disable ip @ disable userspace access
16 and \tmp, \tmp, # 0xfe00 @ Mask opcode field 17 and \tmp, \tmp, # 0xfe00 @ Mask opcode field
17 cmp \tmp, # 0x5600 @ Is it ldrsb? 18 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes 19 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
29 * [7:4] == 1101 30 * [7:4] == 1101
30 * [20] == 0 31 * [20] == 0
31 */ 32 */
32 .macro do_ldrd_abort, tmp, insn 33 .macro teq_ldrd, tmp, insn
33 tst \insn, #0x0e100000 @ [27:25,20] == 0 34 mov \tmp, #0x0e100000
34 bne not_ldrd 35 orr \tmp, #0x000000f0
35 and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 36 and \tmp, \insn, \tmp
36 cmp \tmp, #0x000000d0 37 teq \tmp, #0x000000d0
37 beq do_DataAbort
38not_ldrd:
39 .endm 38 .endm
40
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 870838a46d52..1cb9c1c1c05f 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 291 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
292 L_PTE_RDONLY, 292 L_PTE_RDONLY,
293 .prot_l1 = PMD_TYPE_TABLE, 293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_USER, 294 .domain = DOMAIN_VECTORS,
295 }, 295 },
296 [MT_HIGH_VECTORS] = { 296 [MT_HIGH_VECTORS] = {
297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
298 L_PTE_USER | L_PTE_RDONLY, 298 L_PTE_USER | L_PTE_RDONLY,
299 .prot_l1 = PMD_TYPE_TABLE, 299 .prot_l1 = PMD_TYPE_TABLE,
300 .domain = DOMAIN_USER, 300 .domain = DOMAIN_VECTORS,
301 }, 301 },
302 [MT_MEMORY_RWX] = { 302 [MT_MEMORY_RWX] = {
303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, 303 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
84 if (!new_pte) 84 if (!new_pte)
85 goto no_pte; 85 goto no_pte;
86 86
87#ifndef CONFIG_ARM_LPAE
88 /*
89 * Modify the PTE pointer to have the correct domain. This
90 * needs to be the vectors domain to avoid the low vectors
91 * being unmapped.
92 */
93 pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
94 pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
95#endif
96
87 init_pud = pud_offset(init_pgd, 0); 97 init_pud = pud_offset(init_pgd, 0);
88 init_pmd = pmd_offset(init_pud, 0); 98 init_pmd = pmd_offset(init_pud, 0);
89 init_pte = pte_offset_map(init_pmd, 0); 99 init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/nwfpe/entry.S b/arch/arm/nwfpe/entry.S
index 71df43547659..39c20afad7ed 100644
--- a/arch/arm/nwfpe/entry.S
+++ b/arch/arm/nwfpe/entry.S
@@ -95,9 +95,10 @@ emulate:
95 reteq r4 @ no, return failure 95 reteq r4 @ no, return failure
96 96
97next: 97next:
98 uaccess_enable r3
98.Lx1: ldrt r6, [r5], #4 @ get the next instruction and 99.Lx1: ldrt r6, [r5], #4 @ get the next instruction and
99 @ increment PC 100 @ increment PC
100 101 uaccess_disable r3
101 and r2, r6, #0x0F000000 @ test for FP insns 102 and r2, r6, #0x0F000000 @ test for FP insns
102 teq r2, #0x0C000000 103 teq r2, #0x0C000000
103 teqne r2, #0x0D000000 104 teqne r2, #0x0D000000
diff --git a/arch/arm/xen/hypercall.S b/arch/arm/xen/hypercall.S
index f00e08075938..10fd99c568c6 100644
--- a/arch/arm/xen/hypercall.S
+++ b/arch/arm/xen/hypercall.S
@@ -98,8 +98,23 @@ ENTRY(privcmd_call)
98 mov r1, r2 98 mov r1, r2
99 mov r2, r3 99 mov r2, r3
100 ldr r3, [sp, #8] 100 ldr r3, [sp, #8]
101 /*
102 * Privcmd calls are issued by the userspace. We need to allow the
103 * kernel to access the userspace memory before issuing the hypercall.
104 */
105 uaccess_enable r4
106
107 /* r4 is loaded now as we use it as scratch register before */
101 ldr r4, [sp, #4] 108 ldr r4, [sp, #4]
102 __HVC(XEN_IMM) 109 __HVC(XEN_IMM)
110
111 /*
112 * Disable userspace access from kernel. This is fine to do it
113 * unconditionally as no set_fs(KERNEL_DS)/set_fs(get_ds()) is
114 * called before.
115 */
116 uaccess_disable r4
117
103 ldm sp!, {r4} 118 ldm sp!, {r4}
104 ret lr 119 ret lr
105ENDPROC(privcmd_call); 120ENDPROC(privcmd_call);