diff options
author | Richard Kuo <rkuo@codeaurora.org> | 2012-05-29 18:23:14 -0400 |
---|---|---|
committer | Richard Kuo <rkuo@codeaurora.org> | 2013-04-30 20:40:24 -0400 |
commit | a11e67c2611d483622aad007a3533e7dfbea700e (patch) | |
tree | f54feb07c1f71d87fdf6aaf3b67b80c0ccff3f74 /arch/hexagon | |
parent | 60c4ba99e015afe879c2682967c8ca8d233f6d3c (diff) |
Hexagon: Signal and return path fixes
This fixes the return value of sigreturn and moves the work pending check
into a c routine for readability and fixes the loop for multiple pending
signals. Based on feedback from Al Viro.
Signed-off-by: Richard Kuo <rkuo@codeaurora.org>
Diffstat (limited to 'arch/hexagon')
-rw-r--r-- | arch/hexagon/include/uapi/asm/signal.h | 2 | ||||
-rw-r--r-- | arch/hexagon/kernel/process.c | 41 | ||||
-rw-r--r-- | arch/hexagon/kernel/signal.c | 29 | ||||
-rw-r--r-- | arch/hexagon/kernel/traps.c | 13 | ||||
-rw-r--r-- | arch/hexagon/kernel/vm_entry.S | 87 |
5 files changed, 84 insertions, 88 deletions
diff --git a/arch/hexagon/include/uapi/asm/signal.h b/arch/hexagon/include/uapi/asm/signal.h index 939556817d34..24b998888916 100644 --- a/arch/hexagon/include/uapi/asm/signal.h +++ b/arch/hexagon/include/uapi/asm/signal.h | |||
@@ -21,6 +21,8 @@ | |||
21 | 21 | ||
22 | extern unsigned long __rt_sigtramp_template[2]; | 22 | extern unsigned long __rt_sigtramp_template[2]; |
23 | 23 | ||
24 | void do_signal(struct pt_regs *regs); | ||
25 | |||
24 | #include <asm-generic/signal.h> | 26 | #include <asm-generic/signal.h> |
25 | 27 | ||
26 | #endif | 28 | #endif |
diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 06ae9ffcabd5..dc72ed5b9ed9 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/tick.h> | 24 | #include <linux/tick.h> |
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/tracehook.h> | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * Program thread launch. Often defined as a macro in processor.h, | 30 | * Program thread launch. Often defined as a macro in processor.h, |
@@ -202,3 +203,43 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu) | |||
202 | { | 203 | { |
203 | return 0; | 204 | return 0; |
204 | } | 205 | } |
206 | |||
207 | |||
208 | /* | ||
209 | * Called on the exit path of event entry; see vm_entry.S | ||
210 | * | ||
211 | * Interrupts will already be disabled. | ||
212 | * | ||
213 | * Returns 0 if there's no need to re-check for more work. | ||
214 | */ | ||
215 | |||
216 | int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) | ||
217 | { | ||
218 | if (!(thread_info_flags & _TIF_ALLWORK_MASK)) { | ||
219 | return 0; | ||
220 | } /* shortcut -- no work to be done */ | ||
221 | |||
222 | local_irq_enable(); | ||
223 | |||
224 | if (thread_info_flags & _TIF_NEED_RESCHED) { | ||
225 | schedule(); | ||
226 | return 1; | ||
227 | } | ||
228 | |||
229 | if (thread_info_flags & _TIF_SIGPENDING) { | ||
230 | do_signal(regs); | ||
231 | return 1; | ||
232 | } | ||
233 | |||
234 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
235 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
236 | tracehook_notify_resume(regs); | ||
237 | if (current->replacement_session_keyring) | ||
238 | key_replace_session_keyring(); | ||
239 | return 1; | ||
240 | } | ||
241 | |||
242 | /* Should not even reach here */ | ||
243 | panic("%s: bad thread_info flags 0x%08x\n", __func__, | ||
244 | thread_info_flags); | ||
245 | } | ||
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c index a1492a69752b..8a20e8ed5d7d 100644 --- a/arch/hexagon/kernel/signal.c +++ b/arch/hexagon/kernel/signal.c | |||
@@ -199,7 +199,7 @@ static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, | |||
199 | /* | 199 | /* |
200 | * Called from return-from-event code. | 200 | * Called from return-from-event code. |
201 | */ | 201 | */ |
202 | static void do_signal(struct pt_regs *regs) | 202 | void do_signal(struct pt_regs *regs) |
203 | { | 203 | { |
204 | struct k_sigaction sigact; | 204 | struct k_sigaction sigact; |
205 | siginfo_t info; | 205 | siginfo_t info; |
@@ -216,8 +216,9 @@ static void do_signal(struct pt_regs *regs) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * If we came from a system call, handle the restart. | 219 | * No (more) signals; if we came from a system call, handle the restart. |
220 | */ | 220 | */ |
221 | |||
221 | if (regs->syscall_nr >= 0) { | 222 | if (regs->syscall_nr >= 0) { |
222 | switch (regs->r00) { | 223 | switch (regs->r00) { |
223 | case -ERESTARTNOHAND: | 224 | case -ERESTARTNOHAND: |
@@ -240,17 +241,6 @@ no_restart: | |||
240 | restore_saved_sigmask(); | 241 | restore_saved_sigmask(); |
241 | } | 242 | } |
242 | 243 | ||
243 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | ||
244 | { | ||
245 | if (thread_info_flags & _TIF_SIGPENDING) | ||
246 | do_signal(regs); | ||
247 | |||
248 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
249 | clear_thread_flag(TIF_NOTIFY_RESUME); | ||
250 | tracehook_notify_resume(regs); | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* | 244 | /* |
255 | * Architecture-specific wrappers for signal-related system calls | 245 | * Architecture-specific wrappers for signal-related system calls |
256 | */ | 246 | */ |
@@ -278,21 +268,12 @@ asmlinkage int sys_rt_sigreturn(void) | |||
278 | /* Restore the user's stack as well */ | 268 | /* Restore the user's stack as well */ |
279 | pt_psp(regs) = regs->r29; | 269 | pt_psp(regs) = regs->r29; |
280 | 270 | ||
281 | /* | 271 | regs->syscall_nr = -1; |
282 | * Leave a trace in the stack frame that this was a sigreturn. | ||
283 | * If the system call is to replay, we've already restored the | ||
284 | * number in the GPR slot and it will be regenerated on the | ||
285 | * new system call trap entry. Note that if restore_sigcontext() | ||
286 | * did something other than a bulk copy of the pt_regs struct, | ||
287 | * we could avoid this assignment by simply not overwriting | ||
288 | * regs->syscall_nr. | ||
289 | */ | ||
290 | regs->syscall_nr = __NR_rt_sigreturn; | ||
291 | 272 | ||
292 | if (restore_altstack(&frame->uc.uc_stack)) | 273 | if (restore_altstack(&frame->uc.uc_stack)) |
293 | goto badframe; | 274 | goto badframe; |
294 | 275 | ||
295 | return 0; | 276 | return regs->r00; |
296 | 277 | ||
297 | badframe: | 278 | badframe: |
298 | force_sig(SIGSEGV, current); | 279 | force_sig(SIGSEGV, current); |
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c index be5e2dd9c9d3..d59ee62f772d 100644 --- a/arch/hexagon/kernel/traps.c +++ b/arch/hexagon/kernel/traps.c | |||
@@ -356,7 +356,6 @@ long sys_syscall(void) | |||
356 | 356 | ||
357 | void do_trap0(struct pt_regs *regs) | 357 | void do_trap0(struct pt_regs *regs) |
358 | { | 358 | { |
359 | unsigned long syscallret = 0; | ||
360 | syscall_fn syscall; | 359 | syscall_fn syscall; |
361 | 360 | ||
362 | switch (pt_cause(regs)) { | 361 | switch (pt_cause(regs)) { |
@@ -396,21 +395,11 @@ void do_trap0(struct pt_regs *regs) | |||
396 | } else { | 395 | } else { |
397 | syscall = (syscall_fn) | 396 | syscall = (syscall_fn) |
398 | (sys_call_table[regs->syscall_nr]); | 397 | (sys_call_table[regs->syscall_nr]); |
399 | syscallret = syscall(regs->r00, regs->r01, | 398 | regs->r00 = syscall(regs->r00, regs->r01, |
400 | regs->r02, regs->r03, | 399 | regs->r02, regs->r03, |
401 | regs->r04, regs->r05); | 400 | regs->r04, regs->r05); |
402 | } | 401 | } |
403 | 402 | ||
404 | /* | ||
405 | * If it was a sigreturn system call, don't overwrite | ||
406 | * r0 value in stack frame with return value. | ||
407 | * | ||
408 | * __NR_sigreturn doesn't seem to exist in new unistd.h | ||
409 | */ | ||
410 | |||
411 | if (regs->syscall_nr != __NR_rt_sigreturn) | ||
412 | regs->r00 = syscallret; | ||
413 | |||
414 | /* allow strace to get the syscall return state */ | 403 | /* allow strace to get the syscall return state */ |
415 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE))) | 404 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE))) |
416 | tracehook_report_syscall_exit(regs, 0); | 405 | tracehook_report_syscall_exit(regs, 0); |
diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index ffe4a424b706..053551ee7114 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S | |||
@@ -274,6 +274,9 @@ event_dispatch: | |||
274 | callr r1 | 274 | callr r1 |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * Coming back from the C-world, our thread info pointer | ||
278 | * should be in the designated register (usually R19) | ||
279 | * | ||
277 | * If we were in kernel mode, we don't need to check scheduler | 280 | * If we were in kernel mode, we don't need to check scheduler |
278 | * or signals if CONFIG_PREEMPT is not set. If set, then it has | 281 | * or signals if CONFIG_PREEMPT is not set. If set, then it has |
279 | * to jump to a need_resched kind of block. | 282 | * to jump to a need_resched kind of block. |
@@ -286,67 +289,43 @@ event_dispatch: | |||
286 | #endif | 289 | #endif |
287 | 290 | ||
288 | /* "Nested control path" -- if the previous mode was kernel */ | 291 | /* "Nested control path" -- if the previous mode was kernel */ |
289 | R0 = memw(R29 + #_PT_ER_VMEST); | ||
290 | { | 292 | { |
291 | P0 = tstbit(R0, #HVM_VMEST_UM_SFT); | 293 | R0 = memw(R29 + #_PT_ER_VMEST); |
292 | if (!P0.new) jump:nt restore_all; | 294 | R16.L = #LO(do_work_pending); |
293 | } | 295 | } |
294 | /* | ||
295 | * Returning from system call, normally coming back from user mode | ||
296 | */ | ||
297 | return_from_syscall: | ||
298 | /* Disable interrupts while checking TIF */ | ||
299 | R0 = #VM_INT_DISABLE | ||
300 | trap1(#HVM_TRAP1_VMSETIE) | ||
301 | |||
302 | /* | ||
303 | * Coming back from the C-world, our thread info pointer | ||
304 | * should be in the designated register (usually R19) | ||
305 | */ | ||
306 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 | ||
307 | R1.L = #LO(_TIF_ALLWORK_MASK) | ||
308 | { | 296 | { |
309 | R1.H = #HI(_TIF_ALLWORK_MASK); | 297 | P0 = tstbit(R0, #HVM_VMEST_UM_SFT); |
310 | R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); | 298 | if (!P0.new) jump:nt restore_all; |
311 | } | 299 | R16.H = #HI(do_work_pending); |
312 | #else | 300 | R0 = #VM_INT_DISABLE; |
313 | { | ||
314 | R1 = ##_TIF_ALLWORK_MASK; | ||
315 | R0 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); | ||
316 | } | 301 | } |
317 | #endif | ||
318 | 302 | ||
319 | /* | 303 | /* |
320 | * Compare against the "return to userspace" _TIF_WORK_MASK | 304 | * Check also the return from fork/system call, normally coming back from |
305 | * user mode | ||
306 | * | ||
307 | * R16 needs to have do_work_pending, and R0 should have VM_INT_DISABLE | ||
321 | */ | 308 | */ |
322 | R1 = and(R1,R0); | ||
323 | { P0 = cmp.eq(R1,#0); if (!P0.new) jump:t work_pending;} | ||
324 | jump restore_all; /* we're outta here! */ | ||
325 | 309 | ||
326 | work_pending: | 310 | check_work_pending: |
311 | /* Disable interrupts while checking TIF */ | ||
312 | trap1(#HVM_TRAP1_VMSETIE) | ||
327 | { | 313 | { |
328 | P0 = tstbit(R1, #TIF_NEED_RESCHED); | 314 | R0 = R29; /* regs should still be at top of stack */ |
329 | if (!P0.new) jump:nt work_notifysig; | 315 | R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); |
316 | callr R16; | ||
330 | } | 317 | } |
331 | call schedule | ||
332 | jump return_from_syscall; /* check for more work */ | ||
333 | 318 | ||
334 | work_notifysig: | ||
335 | /* this is the part that's kind of fuzzy. */ | ||
336 | R1 = and(R0, #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME)); | ||
337 | { | ||
338 | P0 = cmp.eq(R1, #0); | ||
339 | if P0.new jump:t restore_all; | ||
340 | } | ||
341 | { | 319 | { |
342 | R1 = R0; /* unsigned long thread_info_flags */ | 320 | P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending; |
343 | R0 = R29; /* regs should still be at top of stack */ | 321 | R0 = #VM_INT_DISABLE; |
344 | } | 322 | } |
345 | call do_notify_resume | ||
346 | 323 | ||
347 | restore_all: | 324 | restore_all: |
348 | /* Disable interrupts, if they weren't already, before reg restore. */ | 325 | /* |
349 | R0 = #VM_INT_DISABLE | 326 | * Disable interrupts, if they weren't already, before reg restore. |
327 | * R0 gets preloaded with #VM_INT_DISABLE before we get here. | ||
328 | */ | ||
350 | trap1(#HVM_TRAP1_VMSETIE) | 329 | trap1(#HVM_TRAP1_VMSETIE) |
351 | 330 | ||
352 | /* do the setregs here for VM 0.5 */ | 331 | /* do the setregs here for VM 0.5 */ |
@@ -371,6 +350,7 @@ restore_all: | |||
371 | trap1(#HVM_TRAP1_VMRTE) | 350 | trap1(#HVM_TRAP1_VMRTE) |
372 | /* Notreached */ | 351 | /* Notreached */ |
373 | 352 | ||
353 | |||
374 | .globl _K_enter_genex | 354 | .globl _K_enter_genex |
375 | _K_enter_genex: | 355 | _K_enter_genex: |
376 | vm_event_entry(do_genex) | 356 | vm_event_entry(do_genex) |
@@ -390,9 +370,12 @@ _K_enter_machcheck: | |||
390 | 370 | ||
391 | .globl ret_from_fork | 371 | .globl ret_from_fork |
392 | ret_from_fork: | 372 | ret_from_fork: |
393 | call schedule_tail | 373 | { |
394 | P0 = cmp.eq(R24, #0); | 374 | call schedule_tail; |
395 | if P0 jump return_from_syscall | 375 | R16.H = #HI(do_work_pending); |
396 | R0 = R25; | 376 | } |
397 | callr R24 | 377 | { |
398 | jump return_from_syscall | 378 | R16.L = #LO(do_work_pending); |
379 | R0 = #VM_INT_DISABLE; | ||
380 | jump check_work_pending; | ||
381 | } | ||