diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-10-14 15:43:06 -0400 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-10-19 14:28:09 -0400 |
commit | 54d496c3915a10b5e46c5dd9de1a6d301ceb32bd (patch) | |
tree | d55abe04fe72f9c348256cd64917d2bcfde76965 /arch/ia64 | |
parent | c19e6d67e4be16e20ff90f0baa98b16d926d23a5 (diff) |
ia64: switch to generic kernel_thread()/kernel_execve()
Acked-by: Tony Luck <tony.luck@intel.com>
Tested-by: Tony Luck <tony.luck@intel.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/processor.h | 16 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 129 |
5 files changed, 82 insertions, 107 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3279646120e3..670600468128 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -42,6 +42,8 @@ config IA64 | |||
42 | select GENERIC_TIME_VSYSCALL_OLD | 42 | select GENERIC_TIME_VSYSCALL_OLD |
43 | select HAVE_MOD_ARCH_SPECIFIC | 43 | select HAVE_MOD_ARCH_SPECIFIC |
44 | select MODULES_USE_ELF_RELA | 44 | select MODULES_USE_ELF_RELA |
45 | select GENERIC_KERNEL_THREAD | ||
46 | select GENERIC_KERNEL_EXECVE | ||
45 | default y | 47 | default y |
46 | help | 48 | help |
47 | The Itanium Processor Family is Intel's 64-bit successor to | 49 | The Itanium Processor Family is Intel's 64-bit successor to |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index 944152a50912..e0a899a1a8a6 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -340,22 +340,6 @@ struct task_struct; | |||
340 | */ | 340 | */ |
341 | #define release_thread(dead_task) | 341 | #define release_thread(dead_task) |
342 | 342 | ||
343 | /* | ||
344 | * This is the mechanism for creating a new kernel thread. | ||
345 | * | ||
346 | * NOTE 1: Only a kernel-only process (ie the swapper or direct | ||
347 | * descendants who haven't done an "execve()") should use this: it | ||
348 | * will work within a system call from a "real" process, but the | ||
349 | * process memory space will not be free'd until both the parent and | ||
350 | * the child have exited. | ||
351 | * | ||
352 | * NOTE 2: This MUST NOT be an inlined function. Otherwise, we get | ||
353 | * into trouble in init/main.c when the child thread returns to | ||
354 | * do_basic_setup() and the timing is such that free_initmem() has | ||
355 | * been called already. | ||
356 | */ | ||
357 | extern pid_t kernel_thread (int (*fn)(void *), void *arg, unsigned long flags); | ||
358 | |||
359 | /* Get wait channel for task P. */ | 343 | /* Get wait channel for task P. */ |
360 | extern unsigned long get_wchan (struct task_struct *p); | 344 | extern unsigned long get_wchan (struct task_struct *p); |
361 | 345 | ||
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6b0648d97b4c..0dea684e1905 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -484,13 +484,6 @@ GLOBAL_ENTRY(prefetch_stack) | |||
484 | br.ret.sptk.many rp | 484 | br.ret.sptk.many rp |
485 | END(prefetch_stack) | 485 | END(prefetch_stack) |
486 | 486 | ||
487 | GLOBAL_ENTRY(kernel_execve) | ||
488 | rum psr.ac | ||
489 | mov r15=__NR_execve // put syscall number in place | ||
490 | break __BREAK_SYSCALL | ||
491 | br.ret.sptk.many rp | ||
492 | END(kernel_execve) | ||
493 | |||
494 | /* | 487 | /* |
495 | * Invoke a system call, but do some tracing before and after the call. | 488 | * Invoke a system call, but do some tracing before and after the call. |
496 | * We MUST preserve the current register frame throughout this routine | 489 | * We MUST preserve the current register frame throughout this routine |
@@ -594,6 +587,27 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel) | |||
594 | .ret4: br.cond.sptk ia64_leave_kernel | 587 | .ret4: br.cond.sptk ia64_leave_kernel |
595 | END(ia64_strace_leave_kernel) | 588 | END(ia64_strace_leave_kernel) |
596 | 589 | ||
590 | ENTRY(call_payload) | ||
591 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0) | ||
592 | /* call the kernel_thread payload; fn is in r4, arg - in r5 */ | ||
593 | alloc loc1=ar.pfs,0,3,1,0 | ||
594 | mov loc0=rp | ||
595 | mov loc2=gp | ||
596 | mov out0=r5 // arg | ||
597 | ld8 r14 = [r4], 8 // fn.address | ||
598 | ;; | ||
599 | mov b6 = r14 | ||
600 | ld8 gp = [r4] // fn.gp | ||
601 | ;; | ||
602 | br.call.sptk.many rp=b6 // fn(arg) | ||
603 | .ret12: mov gp=loc2 | ||
604 | mov rp=loc0 | ||
605 | mov ar.pfs=loc1 | ||
606 | /* ... and if it has returned, we are going to userland */ | ||
607 | cmp.ne pKStk,pUStk=r0,r0 | ||
608 | br.ret.sptk.many rp | ||
609 | END(call_payload) | ||
610 | |||
597 | GLOBAL_ENTRY(ia64_ret_from_clone) | 611 | GLOBAL_ENTRY(ia64_ret_from_clone) |
598 | PT_REGS_UNWIND_INFO(0) | 612 | PT_REGS_UNWIND_INFO(0) |
599 | { /* | 613 | { /* |
@@ -610,6 +624,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) | |||
610 | br.call.sptk.many rp=ia64_invoke_schedule_tail | 624 | br.call.sptk.many rp=ia64_invoke_schedule_tail |
611 | } | 625 | } |
612 | .ret8: | 626 | .ret8: |
627 | (pKStk) br.call.sptk.many rp=call_payload | ||
613 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | 628 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 |
614 | ;; | 629 | ;; |
615 | ld4 r2=[r2] | 630 | ld4 r2=[r2] |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 629a250f7c19..4738ff7bd66a 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1093,19 +1093,6 @@ GLOBAL_ENTRY(cycle_to_cputime) | |||
1093 | END(cycle_to_cputime) | 1093 | END(cycle_to_cputime) |
1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
1095 | 1095 | ||
1096 | GLOBAL_ENTRY(start_kernel_thread) | ||
1097 | .prologue | ||
1098 | .save rp, r0 // this is the end of the call-chain | ||
1099 | .body | ||
1100 | alloc r2 = ar.pfs, 0, 0, 2, 0 | ||
1101 | mov out0 = r9 | ||
1102 | mov out1 = r11;; | ||
1103 | br.call.sptk.many rp = kernel_thread_helper;; | ||
1104 | mov out0 = r8 | ||
1105 | br.call.sptk.many rp = sys_exit;; | ||
1106 | 1: br.sptk.few 1b // not reached | ||
1107 | END(start_kernel_thread) | ||
1108 | |||
1109 | #ifdef CONFIG_IA64_BRL_EMU | 1096 | #ifdef CONFIG_IA64_BRL_EMU |
1110 | 1097 | ||
1111 | /* | 1098 | /* |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 6a48775d9363..37686dbfd264 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -401,55 +401,15 @@ copy_thread(unsigned long clone_flags, | |||
401 | struct pt_regs *child_ptregs; | 401 | struct pt_regs *child_ptregs; |
402 | int retval = 0; | 402 | int retval = 0; |
403 | 403 | ||
404 | stack = ((struct switch_stack *) regs) - 1; | ||
405 | |||
406 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; | 404 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; |
407 | child_stack = (struct switch_stack *) child_ptregs - 1; | 405 | child_stack = (struct switch_stack *) child_ptregs - 1; |
408 | 406 | ||
409 | /* copy parent's switch_stack & pt_regs to child: */ | ||
410 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
411 | |||
412 | rbs = (unsigned long) current + IA64_RBS_OFFSET; | 407 | rbs = (unsigned long) current + IA64_RBS_OFFSET; |
413 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; | 408 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; |
414 | 409 | ||
415 | if (likely(user_mode(child_ptregs))) { | ||
416 | /* copy the parent's register backing store to the child: */ | ||
417 | rbs_size = stack->ar_bspstore - rbs; | ||
418 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | ||
419 | if (clone_flags & CLONE_SETTLS) | ||
420 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | ||
421 | if (user_stack_base) { | ||
422 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | ||
423 | child_ptregs->ar_bspstore = user_stack_base; | ||
424 | child_ptregs->ar_rnat = 0; | ||
425 | child_ptregs->loadrs = 0; | ||
426 | } | ||
427 | } else { | ||
428 | /* | ||
429 | * Note: we simply preserve the relative position of | ||
430 | * the stack pointer here. There is no need to | ||
431 | * allocate a scratch area here, since that will have | ||
432 | * been taken care of by the caller of sys_clone() | ||
433 | * already. | ||
434 | */ | ||
435 | rbs_size = 0; | ||
436 | child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ | ||
437 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ | ||
438 | } | ||
439 | child_stack->ar_bspstore = child_rbs + rbs_size; | ||
440 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
441 | |||
442 | /* copy parts of thread_struct: */ | 410 | /* copy parts of thread_struct: */ |
443 | p->thread.ksp = (unsigned long) child_stack - 16; | 411 | p->thread.ksp = (unsigned long) child_stack - 16; |
444 | 412 | ||
445 | /* stop some PSR bits from being inherited. | ||
446 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
447 | * therefore we must specify them explicitly here and not include them in | ||
448 | * IA64_PSR_BITS_TO_CLEAR. | ||
449 | */ | ||
450 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
451 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
452 | |||
453 | /* | 413 | /* |
454 | * NOTE: The calling convention considers all floating point | 414 | * NOTE: The calling convention considers all floating point |
455 | * registers in the high partition (fph) to be scratch. Since | 415 | * registers in the high partition (fph) to be scratch. Since |
@@ -471,8 +431,66 @@ copy_thread(unsigned long clone_flags, | |||
471 | # define THREAD_FLAGS_TO_SET 0 | 431 | # define THREAD_FLAGS_TO_SET 0 |
472 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | 432 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
473 | | THREAD_FLAGS_TO_SET); | 433 | | THREAD_FLAGS_TO_SET); |
434 | |||
474 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ | 435 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
475 | 436 | ||
437 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
438 | if (unlikely(!user_stack_base)) { | ||
439 | /* fork_idle() called us */ | ||
440 | return 0; | ||
441 | } | ||
442 | memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
443 | child_stack->r4 = user_stack_base; /* payload */ | ||
444 | child_stack->r5 = user_stack_size; /* argument */ | ||
445 | /* | ||
446 | * Preserve PSR bits, except for bits 32-34 and 37-45, | ||
447 | * which we can't read. | ||
448 | */ | ||
449 | child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; | ||
450 | /* mark as valid, empty frame */ | ||
451 | child_ptregs->cr_ifs = 1UL << 63; | ||
452 | child_stack->ar_fpsr = child_ptregs->ar_fpsr | ||
453 | = ia64_getreg(_IA64_REG_AR_FPSR); | ||
454 | child_stack->pr = (1 << PRED_KERNEL_STACK); | ||
455 | child_stack->ar_bspstore = child_rbs; | ||
456 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
457 | |||
458 | /* stop some PSR bits from being inherited. | ||
459 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
460 | * therefore we must specify them explicitly here and not include them in | ||
461 | * IA64_PSR_BITS_TO_CLEAR. | ||
462 | */ | ||
463 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
464 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | stack = ((struct switch_stack *) regs) - 1; | ||
469 | /* copy parent's switch_stack & pt_regs to child: */ | ||
470 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
471 | |||
472 | /* copy the parent's register backing store to the child: */ | ||
473 | rbs_size = stack->ar_bspstore - rbs; | ||
474 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | ||
475 | if (clone_flags & CLONE_SETTLS) | ||
476 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | ||
477 | if (user_stack_base) { | ||
478 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | ||
479 | child_ptregs->ar_bspstore = user_stack_base; | ||
480 | child_ptregs->ar_rnat = 0; | ||
481 | child_ptregs->loadrs = 0; | ||
482 | } | ||
483 | child_stack->ar_bspstore = child_rbs + rbs_size; | ||
484 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
485 | |||
486 | /* stop some PSR bits from being inherited. | ||
487 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
488 | * therefore we must specify them explicitly here and not include them in | ||
489 | * IA64_PSR_BITS_TO_CLEAR. | ||
490 | */ | ||
491 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
492 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
493 | |||
476 | #ifdef CONFIG_PERFMON | 494 | #ifdef CONFIG_PERFMON |
477 | if (current->thread.pfm_context) | 495 | if (current->thread.pfm_context) |
478 | pfm_inherit(p, child_ptregs); | 496 | pfm_inherit(p, child_ptregs); |
@@ -618,37 +636,6 @@ out: | |||
618 | return error; | 636 | return error; |
619 | } | 637 | } |
620 | 638 | ||
621 | pid_t | ||
622 | kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) | ||
623 | { | ||
624 | extern void start_kernel_thread (void); | ||
625 | unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; | ||
626 | struct { | ||
627 | struct switch_stack sw; | ||
628 | struct pt_regs pt; | ||
629 | } regs; | ||
630 | |||
631 | memset(®s, 0, sizeof(regs)); | ||
632 | regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ | ||
633 | regs.pt.r1 = helper_fptr[1]; /* set GP */ | ||
634 | regs.pt.r9 = (unsigned long) fn; /* 1st argument */ | ||
635 | regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ | ||
636 | /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ | ||
637 | regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; | ||
638 | regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ | ||
639 | regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); | ||
640 | regs.sw.pr = (1 << PRED_KERNEL_STACK); | ||
641 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); | ||
642 | } | ||
643 | EXPORT_SYMBOL(kernel_thread); | ||
644 | |||
645 | /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ | ||
646 | int | ||
647 | kernel_thread_helper (int (*fn)(void *), void *arg) | ||
648 | { | ||
649 | return (*fn)(arg); | ||
650 | } | ||
651 | |||
652 | /* | 639 | /* |
653 | * Flush thread state. This is called when a thread does an execve(). | 640 | * Flush thread state. This is called when a thread does an execve(). |
654 | */ | 641 | */ |