aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig15
-rw-r--r--arch/arm/include/asm/assembler.h30
-rw-r--r--arch/arm/include/asm/domain.h21
-rw-r--r--arch/arm/include/asm/uaccess.h14
-rw-r--r--arch/arm/kernel/process.c36
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
7 files changed, 125 insertions, 8 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..e15d5ed4d5f1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1694,6 +1694,21 @@ config HIGHPTE
1694 bool "Allocate 2nd-level pagetables from highmem" 1694 bool "Allocate 2nd-level pagetables from highmem"
1695 depends on HIGHMEM 1695 depends on HIGHMEM
1696 1696
1697config CPU_SW_DOMAIN_PAN
1698 bool "Enable use of CPU domains to implement privileged no-access"
1699 depends on MMU && !ARM_LPAE
1700 default y
1701 help
1702 Increase kernel security by ensuring that normal kernel accesses
1703 are unable to access userspace addresses. This can help prevent
1704 use-after-free bugs becoming an exploitable privilege escalation
1705 by ensuring that magic values (such as LIST_POISON) will always
1706 fault when dereferenced.
1707
1708 CPUs with low-vector mappings use a best-efforts implementation.
1709 Their lower 1MB needs to remain accessible for the vectors, but
1710 the remainder of userspace will become appropriately inaccessible.
1711
1697config HW_PERF_EVENTS 1712config HW_PERF_EVENTS
1698 bool "Enable hardware performance counter support for perf events" 1713 bool "Enable hardware performance counter support for perf events"
1699 depends on PERF_EVENTS 1714 depends on PERF_EVENTS
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a91177043467..3ae0eda5e64f 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -446,15 +446,45 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
446 .endm 446 .endm
447 447
448 .macro uaccess_disable, tmp, isb=1 448 .macro uaccess_disable, tmp, isb=1
449#ifdef CONFIG_CPU_SW_DOMAIN_PAN
450 /*
451 * Whenever we re-enter userspace, the domains should always be
452 * set appropriately.
453 */
454 mov \tmp, #DACR_UACCESS_DISABLE
455 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
456 .if \isb
457 instr_sync
458 .endif
459#endif
449 .endm 460 .endm
450 461
451 .macro uaccess_enable, tmp, isb=1 462 .macro uaccess_enable, tmp, isb=1
463#ifdef CONFIG_CPU_SW_DOMAIN_PAN
464 /*
465 * Whenever we re-enter userspace, the domains should always be
466 * set appropriately.
467 */
468 mov \tmp, #DACR_UACCESS_ENABLE
469 mcr p15, 0, \tmp, c3, c0, 0
470 .if \isb
471 instr_sync
472 .endif
473#endif
452 .endm 474 .endm
453 475
454 .macro uaccess_save, tmp 476 .macro uaccess_save, tmp
477#ifdef CONFIG_CPU_SW_DOMAIN_PAN
478 mrc p15, 0, \tmp, c3, c0, 0
479 str \tmp, [sp, #S_FRAME_SIZE]
480#endif
455 .endm 481 .endm
456 482
457 .macro uaccess_restore 483 .macro uaccess_restore
484#ifdef CONFIG_CPU_SW_DOMAIN_PAN
485 ldr r0, [sp, #S_FRAME_SIZE]
486 mcr p15, 0, r0, c3, c0, 0
487#endif
458 .endm 488 .endm
459 489
460 .macro uaccess_save_and_disable, tmp 490 .macro uaccess_save_and_disable, tmp
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 2be929549938..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -57,11 +57,29 @@
57#define domain_mask(dom) ((3) << (2 * (dom))) 57#define domain_mask(dom) ((3) << (2 * (dom)))
58#define domain_val(dom,type) ((type) << (2 * (dom))) 58#define domain_val(dom,type) ((type) << (2 * (dom)))
59 59
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61#define DACR_INIT \
62 (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
63 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
64 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
65 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
66#else
60#define DACR_INIT \ 67#define DACR_INIT \
61 (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \ 68 (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
62 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 69 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
63 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \ 70 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
64 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)) 71 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
72#endif
73
74#define __DACR_DEFAULT \
75 domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
76 domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
77 domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
78
79#define DACR_UACCESS_DISABLE \
80 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
81#define DACR_UACCESS_ENABLE \
82 (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
65 83
66#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
67 85
@@ -76,7 +94,6 @@ static inline unsigned int get_domain(void)
76 return domain; 94 return domain;
77} 95}
78 96
79#ifdef CONFIG_CPU_USE_DOMAINS
80static inline void set_domain(unsigned val) 97static inline void set_domain(unsigned val)
81{ 98{
82 asm volatile( 99 asm volatile(
@@ -85,6 +102,7 @@ static inline void set_domain(unsigned val)
85 isb(); 102 isb();
86} 103}
87 104
105#ifdef CONFIG_CPU_USE_DOMAINS
88#define modify_domain(dom,type) \ 106#define modify_domain(dom,type) \
89 do { \ 107 do { \
90 unsigned int domain = get_domain(); \ 108 unsigned int domain = get_domain(); \
@@ -94,7 +112,6 @@ static inline void set_domain(unsigned val)
94 } while (0) 112 } while (0)
95 113
96#else 114#else
97static inline void set_domain(unsigned val) { }
98static inline void modify_domain(unsigned dom, unsigned type) { } 115static inline void modify_domain(unsigned dom, unsigned type) { }
99#endif 116#endif
100 117
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 82880132f941..01bae13b2cea 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -57,11 +57,25 @@ extern int fixup_exception(struct pt_regs *regs);
57 */ 57 */
58static inline unsigned int uaccess_save_and_enable(void) 58static inline unsigned int uaccess_save_and_enable(void)
59{ 59{
60#ifdef CONFIG_CPU_SW_DOMAIN_PAN
61 unsigned int old_domain = get_domain();
62
63 /* Set the current domain access to permit user accesses */
64 set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
65 domain_val(DOMAIN_USER, DOMAIN_CLIENT));
66
67 return old_domain;
68#else
60 return 0; 69 return 0;
70#endif
61} 71}
62 72
63static inline void uaccess_restore(unsigned int flags) 73static inline void uaccess_restore(unsigned int flags)
64{ 74{
75#ifdef CONFIG_CPU_SW_DOMAIN_PAN
76 /* Restore the user access mask */
77 set_domain(flags);
78#endif
65} 79}
66 80
67/* 81/*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index e722f9b3c9b1..3f18098dfd08 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -129,12 +129,36 @@ void __show_regs(struct pt_regs *regs)
129 buf[4] = '\0'; 129 buf[4] = '\0';
130 130
131#ifndef CONFIG_CPU_V7M 131#ifndef CONFIG_CPU_V7M
132 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", 132 {
133 buf, interrupts_enabled(regs) ? "n" : "ff", 133 unsigned int domain = get_domain();
134 fast_interrupts_enabled(regs) ? "n" : "ff", 134 const char *segment;
135 processor_modes[processor_mode(regs)], 135
136 isa_modes[isa_mode(regs)], 136#ifdef CONFIG_CPU_SW_DOMAIN_PAN
137 get_fs() == get_ds() ? "kernel" : "user"); 137 /*
138 * Get the domain register for the parent context. In user
139 * mode, we don't save the DACR, so lets use what it should
140 * be. For other modes, we place it after the pt_regs struct.
141 */
142 if (user_mode(regs))
143 domain = DACR_UACCESS_ENABLE;
144 else
145 domain = *(unsigned int *)(regs + 1);
146#endif
147
148 if ((domain & domain_mask(DOMAIN_USER)) ==
149 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
150 segment = "none";
151 else if (get_fs() == get_ds())
152 segment = "kernel";
153 else
154 segment = "user";
155
156 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
157 buf, interrupts_enabled(regs) ? "n" : "ff",
158 fast_interrupts_enabled(regs) ? "n" : "ff",
159 processor_modes[processor_mode(regs)],
160 isa_modes[isa_mode(regs)], segment);
161 }
138#else 162#else
139 printk("xPSR: %08lx\n", regs->ARM_cpsr); 163 printk("xPSR: %08lx\n", regs->ARM_cpsr);
140#endif 164#endif
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
141 141
142 while (1) { 142 while (1) {
143 unsigned long temp; 143 unsigned long temp;
144 unsigned int __ua_flags;
144 145
146 __ua_flags = uaccess_save_and_enable();
145 if (type == TYPE_SWPB) 147 if (type == TYPE_SWPB)
146 __user_swpb_asm(*data, address, res, temp); 148 __user_swpb_asm(*data, address, res, temp);
147 else 149 else
148 __user_swp_asm(*data, address, res, temp); 150 __user_swp_asm(*data, address, res, temp);
151 uaccess_restore(__ua_flags);
149 152
150 if (likely(res != -EAGAIN) || signal_pending(current)) 153 if (likely(res != -EAGAIN) || signal_pending(current))
151 break; 154 break;
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
17 17
18 .text 18 .text
19 19
20#ifdef CONFIG_CPU_SW_DOMAIN_PAN
21 .macro save_regs
22 mrc p15, 0, ip, c3, c0, 0
23 stmfd sp!, {r1, r2, r4 - r8, ip, lr}
24 uaccess_enable ip
25 .endm
26
27 .macro load_regs
28 ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
29 mcr p15, 0, ip, c3, c0, 0
30 ret lr
31 .endm
32#else
20 .macro save_regs 33 .macro save_regs
21 stmfd sp!, {r1, r2, r4 - r8, lr} 34 stmfd sp!, {r1, r2, r4 - r8, lr}
22 .endm 35 .endm
@@ -24,6 +37,7 @@
24 .macro load_regs 37 .macro load_regs
25 ldmfd sp!, {r1, r2, r4 - r8, pc} 38 ldmfd sp!, {r1, r2, r4 - r8, pc}
26 .endm 39 .endm
40#endif
27 41
28 .macro load1b, reg1 42 .macro load1b, reg1
29 ldrusr \reg1, r0, 1 43 ldrusr \reg1, r0, 1