aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:05:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-12 21:05:52 -0400
commit4e21fc138bfd7fe625ff5dc81541399aaf9d429b (patch)
tree43bedf14d2eee7711b8241dcfd6bd7b8737d9bd5 /arch/arm/kernel
parent8418263e3547ed3816475e4c55a77004f0426ee6 (diff)
parent5522be6a4624a5f505555569e4d9cee946630686 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal
Pull third pile of kernel_execve() patches from Al Viro: "The last bits of infrastructure for kernel_thread() et.al., with alpha/arm/x86 use of those. Plus sanitizing the asm glue and do_notify_resume() on alpha, fixing the "disabled irq while running task_work stuff" breakage there. At that point the rest of kernel_thread/kernel_execve/sys_execve work can be done independently for different architectures. The only pending bits that do depend on having all architectures converted are restrictred to fs/* and kernel/* - that'll obviously have to wait for the next cycle. I thought we'd have to wait for all of them done before we start eliminating the longjump-style insanity in kernel_execve(), but it turned out there's a very simple way to do that without flagday-style changes." * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/signal: alpha: switch to saner kernel_execve() semantics arm: switch to saner kernel_execve() semantics x86, um: convert to saner kernel_execve() semantics infrastructure for saner ret_from_kernel_thread semantics make sure that kernel_thread() callbacks call do_exit() themselves make sure that we always have a return path from kernel_execve() ppc: eeh_event should just use kthread_run() don't bother with kernel_thread/kernel_execve for launching linuxrc alpha: get rid of switch_stack argument of do_work_pending() alpha: don't bother passing switch_stack separately from regs alpha: take SIGPENDING/NOTIFY_RESUME loop into signal.c alpha: simplify TIF_NEED_RESCHED handling
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/entry-common.S29
-rw-r--r--arch/arm/kernel/process.c5
2 files changed, 6 insertions, 28 deletions
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index e340fa1db203..417bac1846bd 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -86,35 +86,14 @@ ENDPROC(ret_to_user)
86 */ 86 */
87ENTRY(ret_from_fork) 87ENTRY(ret_from_fork)
88 bl schedule_tail 88 bl schedule_tail
89 cmp r5, #0
90 movne r0, r4
91 movne lr, pc
92 movne pc, r5
89 get_thread_info tsk 93 get_thread_info tsk
90 mov why, #1
91 b ret_slow_syscall 94 b ret_slow_syscall
92ENDPROC(ret_from_fork) 95ENDPROC(ret_from_fork)
93 96
94ENTRY(ret_from_kernel_thread)
95 UNWIND(.fnstart)
96 UNWIND(.cantunwind)
97 bl schedule_tail
98 mov r0, r4
99 adr lr, BSYM(1f) @ kernel threads should not exit
100 mov pc, r5
1011: bl do_exit
102 nop
103 UNWIND(.fnend)
104ENDPROC(ret_from_kernel_thread)
105
106/*
107 * turn a kernel thread into userland process
108 * use: ret_from_kernel_execve(struct pt_regs *normal)
109 */
110ENTRY(ret_from_kernel_execve)
111 mov why, #0 @ not a syscall
112 str why, [r0, #S_R0] @ ... and we want 0 in ->ARM_r0 as well
113 get_thread_info tsk @ thread structure
114 mov sp, r0 @ stack pointer just under pt_regs
115 b ret_slow_syscall
116ENDPROC(ret_from_kernel_execve)
117
118 .equ NR_syscalls,0 97 .equ NR_syscalls,0
119#define CALL(x) .equ NR_syscalls,NR_syscalls+1 98#define CALL(x) .equ NR_syscalls,NR_syscalls+1
120#include "calls.S" 99#include "calls.S"
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f98c17ff1957..90084a6de35a 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -373,7 +373,6 @@ void release_thread(struct task_struct *dead_task)
373} 373}
374 374
375asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); 375asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
376asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
377 376
378int 377int
379copy_thread(unsigned long clone_flags, unsigned long stack_start, 378copy_thread(unsigned long clone_flags, unsigned long stack_start,
@@ -388,13 +387,13 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
388 *childregs = *regs; 387 *childregs = *regs;
389 childregs->ARM_r0 = 0; 388 childregs->ARM_r0 = 0;
390 childregs->ARM_sp = stack_start; 389 childregs->ARM_sp = stack_start;
391 thread->cpu_context.pc = (unsigned long)ret_from_fork;
392 } else { 390 } else {
391 memset(childregs, 0, sizeof(struct pt_regs));
393 thread->cpu_context.r4 = stk_sz; 392 thread->cpu_context.r4 = stk_sz;
394 thread->cpu_context.r5 = stack_start; 393 thread->cpu_context.r5 = stack_start;
395 thread->cpu_context.pc = (unsigned long)ret_from_kernel_thread;
396 childregs->ARM_cpsr = SVC_MODE; 394 childregs->ARM_cpsr = SVC_MODE;
397 } 395 }
396 thread->cpu_context.pc = (unsigned long)ret_from_fork;
398 thread->cpu_context.sp = (unsigned long)childregs; 397 thread->cpu_context.sp = (unsigned long)childregs;
399 398
400 clear_ptrace_hw_breakpoint(p); 399 clear_ptrace_hw_breakpoint(p);