aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-12-16 13:57:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-12-16 13:57:24 -0500
commita5e90b1b075f89f084047628d4ef181aded0bbfb (patch)
treeda0010e547be7bf0f3b2bac9d7c74015e5e8662f
parentedb42dc7bc0da0125ceacab810a553ce1f0cac8d (diff)
parent34bfbae33ae84107d0c257edd6c6a8689a09be26 (diff)
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "Further ARM fixes: - Anson Huang noticed that we were corrupting a register we shouldn't be during suspend on some CPUs. - Shengjiu Wang spotted a bug in the 'swp' instruction emulation. - Will Deacon fixed a bug in the ASID allocator. - Laura Abbott fixed the kernel permission protection to apply to all threads running in the system. - I've fixed two bugs with the domain access control register handling, one to do with printing an appropriate value at oops time, and the other to further fix the uaccess_with_memcpy code" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8475/1: SWP emulation: Restore original *data when failed ARM: 8471/1: need to save/restore arm register(r11) when it is corrupted ARM: fix uaccess_with_memcpy() with SW_DOMAIN_PAN ARM: report proper DACR value in oops dumps ARM: 8464/1: Update all mm structures with section adjustments ARM: 8465/1: mm: keep reserved ASIDs in sync with mm after multiple rollovers
-rw-r--r--arch/arm/include/asm/uaccess.h4
-rw-r--r--arch/arm/kernel/process.c33
-rw-r--r--arch/arm/kernel/swp_emulate.c6
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c29
-rw-r--r--arch/arm/mm/context.c38
-rw-r--r--arch/arm/mm/init.c92
-rw-r--r--arch/arm/mm/proc-v7.S4
7 files changed, 138 insertions, 68 deletions
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 8cc85a4ebec2..35c9db857ebe 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
510static inline unsigned long __must_check 510static inline unsigned long __must_check
511__copy_to_user(void __user *to, const void *from, unsigned long n) 511__copy_to_user(void __user *to, const void *from, unsigned long n)
512{ 512{
513#ifndef CONFIG_UACCESS_WITH_MEMCPY
513 unsigned int __ua_flags = uaccess_save_and_enable(); 514 unsigned int __ua_flags = uaccess_save_and_enable();
514 n = arm_copy_to_user(to, from, n); 515 n = arm_copy_to_user(to, from, n);
515 uaccess_restore(__ua_flags); 516 uaccess_restore(__ua_flags);
516 return n; 517 return n;
518#else
519 return arm_copy_to_user(to, from, n);
520#endif
517} 521}
518 522
519extern unsigned long __must_check 523extern unsigned long __must_check
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 7a7c4cea5523..4adfb46e3ee9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
95{ 95{
96 unsigned long flags; 96 unsigned long flags;
97 char buf[64]; 97 char buf[64];
98#ifndef CONFIG_CPU_V7M
99 unsigned int domain;
100#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101 /*
102 * Get the domain register for the parent context. In user
103 * mode, we don't save the DACR, so lets use what it should
104 * be. For other modes, we place it after the pt_regs struct.
105 */
106 if (user_mode(regs))
107 domain = DACR_UACCESS_ENABLE;
108 else
109 domain = *(unsigned int *)(regs + 1);
110#else
111 domain = get_domain();
112#endif
113#endif
98 114
99 show_regs_print_info(KERN_DEFAULT); 115 show_regs_print_info(KERN_DEFAULT);
100 116
@@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
123 139
124#ifndef CONFIG_CPU_V7M 140#ifndef CONFIG_CPU_V7M
125 { 141 {
126 unsigned int domain = get_domain();
127 const char *segment; 142 const char *segment;
128 143
129#ifdef CONFIG_CPU_SW_DOMAIN_PAN
130 /*
131 * Get the domain register for the parent context. In user
132 * mode, we don't save the DACR, so lets use what it should
133 * be. For other modes, we place it after the pt_regs struct.
134 */
135 if (user_mode(regs))
136 domain = DACR_UACCESS_ENABLE;
137 else
138 domain = *(unsigned int *)(regs + 1);
139#endif
140
141 if ((domain & domain_mask(DOMAIN_USER)) == 144 if ((domain & domain_mask(DOMAIN_USER)) ==
142 domain_val(DOMAIN_USER, DOMAIN_NOACCESS)) 145 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
143 segment = "none"; 146 segment = "none";
@@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
163 buf[0] = '\0'; 166 buf[0] = '\0';
164#ifdef CONFIG_CPU_CP15_MMU 167#ifdef CONFIG_CPU_CP15_MMU
165 { 168 {
166 unsigned int transbase, dac = get_domain(); 169 unsigned int transbase;
167 asm("mrc p15, 0, %0, c2, c0\n\t" 170 asm("mrc p15, 0, %0, c2, c0\n\t"
168 : "=r" (transbase)); 171 : "=r" (transbase));
169 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", 172 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
170 transbase, dac); 173 transbase, domain);
171 } 174 }
172#endif 175#endif
173 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); 176 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 5b26e7efa9ea..c3fe769d7558 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -36,10 +36,10 @@
36 */ 36 */
37#define __user_swpX_asm(data, addr, res, temp, B) \ 37#define __user_swpX_asm(data, addr, res, temp, B) \
38 __asm__ __volatile__( \ 38 __asm__ __volatile__( \
39 " mov %2, %1\n" \ 39 "0: ldrex"B" %2, [%3]\n" \
40 "0: ldrex"B" %1, [%3]\n" \ 40 "1: strex"B" %0, %1, [%3]\n" \
41 "1: strex"B" %0, %2, [%3]\n" \
42 " cmp %0, #0\n" \ 41 " cmp %0, #0\n" \
42 " moveq %1, %2\n" \
43 " movne %0, %4\n" \ 43 " movne %0, %4\n" \
44 "2:\n" \ 44 "2:\n" \
45 " .section .text.fixup,\"ax\"\n" \ 45 " .section .text.fixup,\"ax\"\n" \
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index d72b90905132..588bbc288396 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
88static unsigned long noinline 88static unsigned long noinline
89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) 89__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
90{ 90{
91 unsigned long ua_flags;
91 int atomic; 92 int atomic;
92 93
93 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 94 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
@@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
118 if (tocopy > n) 119 if (tocopy > n)
119 tocopy = n; 120 tocopy = n;
120 121
122 ua_flags = uaccess_save_and_enable();
121 memcpy((void *)to, from, tocopy); 123 memcpy((void *)to, from, tocopy);
124 uaccess_restore(ua_flags);
122 to += tocopy; 125 to += tocopy;
123 from += tocopy; 126 from += tocopy;
124 n -= tocopy; 127 n -= tocopy;
@@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
145 * With frame pointer disabled, tail call optimization kicks in 148 * With frame pointer disabled, tail call optimization kicks in
146 * as well making this test almost invisible. 149 * as well making this test almost invisible.
147 */ 150 */
148 if (n < 64) 151 if (n < 64) {
149 return __copy_to_user_std(to, from, n); 152 unsigned long ua_flags = uaccess_save_and_enable();
150 return __copy_to_user_memcpy(to, from, n); 153 n = __copy_to_user_std(to, from, n);
154 uaccess_restore(ua_flags);
155 } else {
156 n = __copy_to_user_memcpy(to, from, n);
157 }
158 return n;
151} 159}
152 160
153static unsigned long noinline 161static unsigned long noinline
154__clear_user_memset(void __user *addr, unsigned long n) 162__clear_user_memset(void __user *addr, unsigned long n)
155{ 163{
164 unsigned long ua_flags;
165
156 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { 166 if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
157 memset((void *)addr, 0, n); 167 memset((void *)addr, 0, n);
158 return 0; 168 return 0;
@@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
175 if (tocopy > n) 185 if (tocopy > n)
176 tocopy = n; 186 tocopy = n;
177 187
188 ua_flags = uaccess_save_and_enable();
178 memset((void *)addr, 0, tocopy); 189 memset((void *)addr, 0, tocopy);
190 uaccess_restore(ua_flags);
179 addr += tocopy; 191 addr += tocopy;
180 n -= tocopy; 192 n -= tocopy;
181 193
@@ -193,9 +205,14 @@ out:
193unsigned long arm_clear_user(void __user *addr, unsigned long n) 205unsigned long arm_clear_user(void __user *addr, unsigned long n)
194{ 206{
195 /* See rational for this in __copy_to_user() above. */ 207 /* See rational for this in __copy_to_user() above. */
196 if (n < 64) 208 if (n < 64) {
197 return __clear_user_std(addr, n); 209 unsigned long ua_flags = uaccess_save_and_enable();
198 return __clear_user_memset(addr, n); 210 n = __clear_user_std(addr, n);
211 uaccess_restore(ua_flags);
212 } else {
213 n = __clear_user_memset(addr, n);
214 }
215 return n;
199} 216}
200 217
201#if 0 218#if 0
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 845769e41332..c8c8b9ed02e0 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
165 __flush_icache_all(); 165 __flush_icache_all();
166} 166}
167 167
168static int is_reserved_asid(u64 asid) 168static bool check_update_reserved_asid(u64 asid, u64 newasid)
169{ 169{
170 int cpu; 170 int cpu;
171 for_each_possible_cpu(cpu) 171 bool hit = false;
172 if (per_cpu(reserved_asids, cpu) == asid) 172
173 return 1; 173 /*
174 return 0; 174 * Iterate over the set of reserved ASIDs looking for a match.
175 * If we find one, then we can update our mm to use newasid
176 * (i.e. the same ASID in the current generation) but we can't
177 * exit the loop early, since we need to ensure that all copies
178 * of the old ASID are updated to reflect the mm. Failure to do
179 * so could result in us missing the reserved ASID in a future
180 * generation.
181 */
182 for_each_possible_cpu(cpu) {
183 if (per_cpu(reserved_asids, cpu) == asid) {
184 hit = true;
185 per_cpu(reserved_asids, cpu) = newasid;
186 }
187 }
188
189 return hit;
175} 190}
176 191
177static u64 new_context(struct mm_struct *mm, unsigned int cpu) 192static u64 new_context(struct mm_struct *mm, unsigned int cpu)
@@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181 u64 generation = atomic64_read(&asid_generation); 196 u64 generation = atomic64_read(&asid_generation);
182 197
183 if (asid != 0) { 198 if (asid != 0) {
199 u64 newasid = generation | (asid & ~ASID_MASK);
200
184 /* 201 /*
185 * If our current ASID was active during a rollover, we 202 * If our current ASID was active during a rollover, we
186 * can continue to use it and this was just a false alarm. 203 * can continue to use it and this was just a false alarm.
187 */ 204 */
188 if (is_reserved_asid(asid)) 205 if (check_update_reserved_asid(asid, newasid))
189 return generation | (asid & ~ASID_MASK); 206 return newasid;
190 207
191 /* 208 /*
192 * We had a valid ASID in a previous life, so try to re-use 209 * We had a valid ASID in a previous life, so try to re-use
@@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
194 */ 211 */
195 asid &= ~ASID_MASK; 212 asid &= ~ASID_MASK;
196 if (!__test_and_set_bit(asid, asid_map)) 213 if (!__test_and_set_bit(asid, asid_map))
197 goto bump_gen; 214 return newasid;
198 } 215 }
199 216
200 /* 217 /*
@@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
216 233
217 __set_bit(asid, asid_map); 234 __set_bit(asid, asid_map);
218 cur_idx = asid; 235 cur_idx = asid;
219
220bump_gen:
221 asid |= generation;
222 cpumask_clear(mm_cpumask(mm)); 236 cpumask_clear(mm_cpumask(mm));
223 return asid; 237 return asid | generation;
224} 238}
225 239
226void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 240void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8a63b4cdc0f2..7f8cd1b3557f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h> 23#include <linux/dma-contiguous.h>
24#include <linux/sizes.h> 24#include <linux/sizes.h>
25#include <linux/stop_machine.h>
25 26
26#include <asm/cp15.h> 27#include <asm/cp15.h>
27#include <asm/mach-types.h> 28#include <asm/mach-types.h>
@@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
627 * safe to be called with preemption disabled, as under stop_machine(). 628 * safe to be called with preemption disabled, as under stop_machine().
628 */ 629 */
629static inline void section_update(unsigned long addr, pmdval_t mask, 630static inline void section_update(unsigned long addr, pmdval_t mask,
630 pmdval_t prot) 631 pmdval_t prot, struct mm_struct *mm)
631{ 632{
632 struct mm_struct *mm;
633 pmd_t *pmd; 633 pmd_t *pmd;
634 634
635 mm = current->active_mm;
636 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr); 635 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
637 636
638#ifdef CONFIG_ARM_LPAE 637#ifdef CONFIG_ARM_LPAE
@@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
656 return !!(get_cr() & CR_XP); 655 return !!(get_cr() & CR_XP);
657} 656}
658 657
659#define set_section_perms(perms, field) { \ 658void set_section_perms(struct section_perm *perms, int n, bool set,
660 size_t i; \ 659 struct mm_struct *mm)
661 unsigned long addr; \ 660{
662 \ 661 size_t i;
663 if (!arch_has_strict_perms()) \ 662 unsigned long addr;
664 return; \ 663
665 \ 664 if (!arch_has_strict_perms())
666 for (i = 0; i < ARRAY_SIZE(perms); i++) { \ 665 return;
667 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \ 666
668 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \ 667 for (i = 0; i < n; i++) {
669 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \ 668 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
670 perms[i].start, perms[i].end, \ 669 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
671 SECTION_SIZE); \ 670 pr_err("BUG: section %lx-%lx not aligned to %lx\n",
672 continue; \ 671 perms[i].start, perms[i].end,
673 } \ 672 SECTION_SIZE);
674 \ 673 continue;
675 for (addr = perms[i].start; \ 674 }
676 addr < perms[i].end; \ 675
677 addr += SECTION_SIZE) \ 676 for (addr = perms[i].start;
678 section_update(addr, perms[i].mask, \ 677 addr < perms[i].end;
679 perms[i].field); \ 678 addr += SECTION_SIZE)
680 } \ 679 section_update(addr, perms[i].mask,
680 set ? perms[i].prot : perms[i].clear, mm);
681 }
682
681} 683}
682 684
683static inline void fix_kernmem_perms(void) 685static void update_sections_early(struct section_perm perms[], int n)
684{ 686{
685 set_section_perms(nx_perms, prot); 687 struct task_struct *t, *s;
688
689 read_lock(&tasklist_lock);
690 for_each_process(t) {
691 if (t->flags & PF_KTHREAD)
692 continue;
693 for_each_thread(t, s)
694 set_section_perms(perms, n, true, s->mm);
695 }
696 read_unlock(&tasklist_lock);
697 set_section_perms(perms, n, true, current->active_mm);
698 set_section_perms(perms, n, true, &init_mm);
699}
700
701int __fix_kernmem_perms(void *unused)
702{
703 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
704 return 0;
705}
706
707void fix_kernmem_perms(void)
708{
709 stop_machine(__fix_kernmem_perms, NULL, NULL);
686} 710}
687 711
688#ifdef CONFIG_DEBUG_RODATA 712#ifdef CONFIG_DEBUG_RODATA
713int __mark_rodata_ro(void *unused)
714{
715 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
716 return 0;
717}
718
689void mark_rodata_ro(void) 719void mark_rodata_ro(void)
690{ 720{
691 set_section_perms(ro_perms, prot); 721 stop_machine(__mark_rodata_ro, NULL, NULL);
692} 722}
693 723
694void set_kernel_text_rw(void) 724void set_kernel_text_rw(void)
695{ 725{
696 set_section_perms(ro_perms, clear); 726 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
727 current->active_mm);
697} 728}
698 729
699void set_kernel_text_ro(void) 730void set_kernel_text_ro(void)
700{ 731{
701 set_section_perms(ro_perms, prot); 732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
733 current->active_mm);
702} 734}
703#endif /* CONFIG_DEBUG_RODATA */ 735#endif /* CONFIG_DEBUG_RODATA */
704 736
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index de2b246fed38..8e1ea433c3f1 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
95.equ cpu_v7_suspend_size, 4 * 9 95.equ cpu_v7_suspend_size, 4 * 9
96#ifdef CONFIG_ARM_CPU_SUSPEND 96#ifdef CONFIG_ARM_CPU_SUSPEND
97ENTRY(cpu_v7_do_suspend) 97ENTRY(cpu_v7_do_suspend)
98 stmfd sp!, {r4 - r10, lr} 98 stmfd sp!, {r4 - r11, lr}
99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID 99 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID 100 mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
101 stmia r0!, {r4 - r5} 101 stmia r0!, {r4 - r5}
@@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register 112 mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control 113 mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
114 stmia r0, {r5 - r11} 114 stmia r0, {r5 - r11}
115 ldmfd sp!, {r4 - r10, pc} 115 ldmfd sp!, {r4 - r11, pc}
116ENDPROC(cpu_v7_do_suspend) 116ENDPROC(cpu_v7_do_suspend)
117 117
118ENTRY(cpu_v7_do_resume) 118ENTRY(cpu_v7_do_resume)