aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-19 18:08:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-19 18:08:53 -0400
commit89d0abe3d695103505c025dde6e07b9c3dd772f4 (patch)
tree76b68d041153acf601aeb2cba3370800d632e4ca
parent89a8c5940d5cb43e6bede51bf4b3a7516b0ca622 (diff)
parentff701306cd49aaff80fb852323b387812bc76491 (diff)
Merge tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64
Pull arm64 fixes from Catalin Marinas: - Post -rc1 update to the common reboot infrastructure. - Fixes (user cache maintenance fault handling, !COMPAT compilation, CPU online and interrupt hanlding). * tag 'arm64-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/cmarinas/linux-aarch64: arm64: use common reboot infrastructure arm64: mm: don't treat user cache maintenance faults as writes arm64: add '#ifdef CONFIG_COMPAT' for aarch32_break_handler() arm64: Only enable local interrupts after the CPU is marked online
-rw-r--r--arch/arm64/include/asm/debug-monitors.h7
-rw-r--r--arch/arm64/include/asm/system_misc.h3
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kernel/smp.c15
-rw-r--r--arch/arm64/mm/fault.c46
5 files changed, 31 insertions, 42 deletions
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c68c09..a2232d07be9d 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -83,14 +83,7 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs)
83} 83}
84#endif 84#endif
85 85
86#ifdef CONFIG_COMPAT
87int aarch32_break_handler(struct pt_regs *regs); 86int aarch32_break_handler(struct pt_regs *regs);
88#else
89static int aarch32_break_handler(struct pt_regs *regs)
90{
91 return -EFAULT;
92}
93#endif
94 87
95#endif /* __ASSEMBLY */ 88#endif /* __ASSEMBLY */
96#endif /* __KERNEL__ */ 89#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index a6e1750369ef..7a18fabbe0f6 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -23,6 +23,7 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/linkage.h> 24#include <linux/linkage.h>
25#include <linux/irqflags.h> 25#include <linux/irqflags.h>
26#include <linux/reboot.h>
26 27
27struct pt_regs; 28struct pt_regs;
28 29
@@ -41,7 +42,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
41extern void __show_regs(struct pt_regs *); 42extern void __show_regs(struct pt_regs *);
42 43
43void soft_restart(unsigned long); 44void soft_restart(unsigned long);
44extern void (*arm_pm_restart)(char str, const char *cmd); 45extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
45 46
46#define UDBG_UNDEFINED (1 << 0) 47#define UDBG_UNDEFINED (1 << 0)
47#define UDBG_SYSCALL (1 << 1) 48#define UDBG_SYSCALL (1 << 1)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 46f02c3b5015..1788bf6b471f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -132,7 +132,7 @@ void machine_restart(char *cmd)
132 132
133 /* Now call the architecture specific reboot code. */ 133 /* Now call the architecture specific reboot code. */
134 if (arm_pm_restart) 134 if (arm_pm_restart)
135 arm_pm_restart('h', cmd); 135 arm_pm_restart(reboot_mode, cmd);
136 136
137 /* 137 /*
138 * Whoops - the architecture was unable to reboot. 138 * Whoops - the architecture was unable to reboot.
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 4a053b3d1728..fee5cce83450 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -200,13 +200,6 @@ asmlinkage void secondary_start_kernel(void)
200 raw_spin_unlock(&boot_lock); 200 raw_spin_unlock(&boot_lock);
201 201
202 /* 202 /*
203 * Enable local interrupts.
204 */
205 notify_cpu_starting(cpu);
206 local_irq_enable();
207 local_fiq_enable();
208
209 /*
210 * OK, now it's safe to let the boot CPU continue. Wait for 203 * OK, now it's safe to let the boot CPU continue. Wait for
211 * the CPU migration code to notice that the CPU is online 204 * the CPU migration code to notice that the CPU is online
212 * before we continue. 205 * before we continue.
@@ -215,6 +208,14 @@ asmlinkage void secondary_start_kernel(void)
215 complete(&cpu_running); 208 complete(&cpu_running);
216 209
217 /* 210 /*
211 * Enable GIC and timers.
212 */
213 notify_cpu_starting(cpu);
214
215 local_irq_enable();
216 local_fiq_enable();
217
218 /*
218 * OK, it's off to the idle thread for us 219 * OK, it's off to the idle thread for us
219 */ 220 */
220 cpu_startup_entry(CPUHP_ONLINE); 221 cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0ecac8980aae..6c8ba25bf6bb 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -152,25 +152,8 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
152#define ESR_CM (1 << 8) 152#define ESR_CM (1 << 8)
153#define ESR_LNX_EXEC (1 << 24) 153#define ESR_LNX_EXEC (1 << 24)
154 154
155/*
156 * Check that the permissions on the VMA allow for the fault which occurred.
157 * If we encountered a write fault, we must have write permission, otherwise
158 * we allow any permission.
159 */
160static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
161{
162 unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
163
164 if (esr & ESR_WRITE)
165 mask = VM_WRITE;
166 if (esr & ESR_LNX_EXEC)
167 mask = VM_EXEC;
168
169 return vma->vm_flags & mask ? false : true;
170}
171
172static int __do_page_fault(struct mm_struct *mm, unsigned long addr, 155static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
173 unsigned int esr, unsigned int flags, 156 unsigned int mm_flags, unsigned long vm_flags,
174 struct task_struct *tsk) 157 struct task_struct *tsk)
175{ 158{
176 struct vm_area_struct *vma; 159 struct vm_area_struct *vma;
@@ -188,12 +171,17 @@ static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
188 * it. 171 * it.
189 */ 172 */
190good_area: 173good_area:
191 if (access_error(esr, vma)) { 174 /*
175 * Check that the permissions on the VMA allow for the fault which
176 * occurred. If we encountered a write or exec fault, we must have
177 * appropriate permissions, otherwise we allow any permission.
178 */
179 if (!(vma->vm_flags & vm_flags)) {
192 fault = VM_FAULT_BADACCESS; 180 fault = VM_FAULT_BADACCESS;
193 goto out; 181 goto out;
194 } 182 }
195 183
196 return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); 184 return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
197 185
198check_stack: 186check_stack:
199 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) 187 if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +196,15 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
208 struct task_struct *tsk; 196 struct task_struct *tsk;
209 struct mm_struct *mm; 197 struct mm_struct *mm;
210 int fault, sig, code; 198 int fault, sig, code;
211 bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); 199 unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
212 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | 200 unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
213 (write ? FAULT_FLAG_WRITE : 0); 201
202 if (esr & ESR_LNX_EXEC) {
203 vm_flags = VM_EXEC;
204 } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
205 vm_flags = VM_WRITE;
206 mm_flags |= FAULT_FLAG_WRITE;
207 }
214 208
215 tsk = current; 209 tsk = current;
216 mm = tsk->mm; 210 mm = tsk->mm;
@@ -248,7 +242,7 @@ retry:
248#endif 242#endif
249 } 243 }
250 244
251 fault = __do_page_fault(mm, addr, esr, flags, tsk); 245 fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
252 246
253 /* 247 /*
254 * If we need to retry but a fatal signal is pending, handle the 248 * If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +259,7 @@ retry:
265 */ 259 */
266 260
267 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 261 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
268 if (flags & FAULT_FLAG_ALLOW_RETRY) { 262 if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
269 if (fault & VM_FAULT_MAJOR) { 263 if (fault & VM_FAULT_MAJOR) {
270 tsk->maj_flt++; 264 tsk->maj_flt++;
271 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, 265 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,7 +274,7 @@ retry:
280 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of 274 * Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
281 * starvation. 275 * starvation.
282 */ 276 */
283 flags &= ~FAULT_FLAG_ALLOW_RETRY; 277 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
284 goto retry; 278 goto retry;
285 } 279 }
286 } 280 }