diff options
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r-- | arch/ia64/kernel/entry.S | 53 | ||||
-rw-r--r-- | arch/ia64/kernel/head.S | 13 | ||||
-rw-r--r-- | arch/ia64/kernel/process.c | 161 | ||||
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 5 |
4 files changed, 89 insertions, 143 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 1ccbe12a4d84..e25b784a2b72 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S | |||
@@ -61,14 +61,13 @@ ENTRY(ia64_execve) | |||
61 | * Allocate 8 input registers since ptrace() may clobber them | 61 | * Allocate 8 input registers since ptrace() may clobber them |
62 | */ | 62 | */ |
63 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) | 63 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8) |
64 | alloc loc1=ar.pfs,8,2,4,0 | 64 | alloc loc1=ar.pfs,8,2,3,0 |
65 | mov loc0=rp | 65 | mov loc0=rp |
66 | .body | 66 | .body |
67 | mov out0=in0 // filename | 67 | mov out0=in0 // filename |
68 | ;; // stop bit between alloc and call | 68 | ;; // stop bit between alloc and call |
69 | mov out1=in1 // argv | 69 | mov out1=in1 // argv |
70 | mov out2=in2 // envp | 70 | mov out2=in2 // envp |
71 | add out3=16,sp // regs | ||
72 | br.call.sptk.many rp=sys_execve | 71 | br.call.sptk.many rp=sys_execve |
73 | .ret0: | 72 | .ret0: |
74 | cmp4.ge p6,p7=r8,r0 | 73 | cmp4.ge p6,p7=r8,r0 |
@@ -76,7 +75,6 @@ ENTRY(ia64_execve) | |||
76 | sxt4 r8=r8 // return 64-bit result | 75 | sxt4 r8=r8 // return 64-bit result |
77 | ;; | 76 | ;; |
78 | stf.spill [sp]=f0 | 77 | stf.spill [sp]=f0 |
79 | (p6) cmp.ne pKStk,pUStk=r0,r0 // a successful execve() lands us in user-mode... | ||
80 | mov rp=loc0 | 78 | mov rp=loc0 |
81 | (p6) mov ar.pfs=r0 // clear ar.pfs on success | 79 | (p6) mov ar.pfs=r0 // clear ar.pfs on success |
82 | (p7) br.ret.sptk.many rp | 80 | (p7) br.ret.sptk.many rp |
@@ -118,13 +116,12 @@ GLOBAL_ENTRY(sys_clone2) | |||
118 | mov loc1=r16 // save ar.pfs across do_fork | 116 | mov loc1=r16 // save ar.pfs across do_fork |
119 | .body | 117 | .body |
120 | mov out1=in1 | 118 | mov out1=in1 |
121 | mov out3=in2 | 119 | mov out2=in2 |
122 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT | 120 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
123 | mov out4=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID | 121 | mov out3=in3 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
124 | ;; | 122 | ;; |
125 | (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() | 123 | (p6) st8 [r2]=in5 // store TLS in r16 for copy_thread() |
126 | mov out5=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID | 124 | mov out4=in4 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
127 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
128 | mov out0=in0 // out0 = clone_flags | 125 | mov out0=in0 // out0 = clone_flags |
129 | br.call.sptk.many rp=do_fork | 126 | br.call.sptk.many rp=do_fork |
130 | .ret1: .restore sp | 127 | .ret1: .restore sp |
@@ -150,13 +147,12 @@ GLOBAL_ENTRY(sys_clone) | |||
150 | mov loc1=r16 // save ar.pfs across do_fork | 147 | mov loc1=r16 // save ar.pfs across do_fork |
151 | .body | 148 | .body |
152 | mov out1=in1 | 149 | mov out1=in1 |
153 | mov out3=16 // stacksize (compensates for 16-byte scratch area) | 150 | mov out2=16 // stacksize (compensates for 16-byte scratch area) |
154 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT | 151 | tbit.nz p6,p0=in0,CLONE_SETTLS_BIT |
155 | mov out4=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID | 152 | mov out3=in2 // parent_tidptr: valid only w/CLONE_PARENT_SETTID |
156 | ;; | 153 | ;; |
157 | (p6) st8 [r2]=in4 // store TLS in r13 (tp) | 154 | (p6) st8 [r2]=in4 // store TLS in r13 (tp) |
158 | mov out5=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID | 155 | mov out4=in3 // child_tidptr: valid only w/CLONE_CHILD_SETTID or CLONE_CHILD_CLEARTID |
159 | adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = ®s | ||
160 | mov out0=in0 // out0 = clone_flags | 156 | mov out0=in0 // out0 = clone_flags |
161 | br.call.sptk.many rp=do_fork | 157 | br.call.sptk.many rp=do_fork |
162 | .ret2: .restore sp | 158 | .ret2: .restore sp |
@@ -484,19 +480,6 @@ GLOBAL_ENTRY(prefetch_stack) | |||
484 | br.ret.sptk.many rp | 480 | br.ret.sptk.many rp |
485 | END(prefetch_stack) | 481 | END(prefetch_stack) |
486 | 482 | ||
487 | GLOBAL_ENTRY(kernel_execve) | ||
488 | rum psr.ac | ||
489 | mov r15=__NR_execve // put syscall number in place | ||
490 | break __BREAK_SYSCALL | ||
491 | br.ret.sptk.many rp | ||
492 | END(kernel_execve) | ||
493 | |||
494 | GLOBAL_ENTRY(clone) | ||
495 | mov r15=__NR_clone // put syscall number in place | ||
496 | break __BREAK_SYSCALL | ||
497 | br.ret.sptk.many rp | ||
498 | END(clone) | ||
499 | |||
500 | /* | 483 | /* |
501 | * Invoke a system call, but do some tracing before and after the call. | 484 | * Invoke a system call, but do some tracing before and after the call. |
502 | * We MUST preserve the current register frame throughout this routine | 485 | * We MUST preserve the current register frame throughout this routine |
@@ -600,6 +583,27 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel) | |||
600 | .ret4: br.cond.sptk ia64_leave_kernel | 583 | .ret4: br.cond.sptk ia64_leave_kernel |
601 | END(ia64_strace_leave_kernel) | 584 | END(ia64_strace_leave_kernel) |
602 | 585 | ||
586 | ENTRY(call_payload) | ||
587 | .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(0) | ||
588 | /* call the kernel_thread payload; fn is in r4, arg - in r5 */ | ||
589 | alloc loc1=ar.pfs,0,3,1,0 | ||
590 | mov loc0=rp | ||
591 | mov loc2=gp | ||
592 | mov out0=r5 // arg | ||
593 | ld8 r14 = [r4], 8 // fn.address | ||
594 | ;; | ||
595 | mov b6 = r14 | ||
596 | ld8 gp = [r4] // fn.gp | ||
597 | ;; | ||
598 | br.call.sptk.many rp=b6 // fn(arg) | ||
599 | .ret12: mov gp=loc2 | ||
600 | mov rp=loc0 | ||
601 | mov ar.pfs=loc1 | ||
602 | /* ... and if it has returned, we are going to userland */ | ||
603 | cmp.ne pKStk,pUStk=r0,r0 | ||
604 | br.ret.sptk.many rp | ||
605 | END(call_payload) | ||
606 | |||
603 | GLOBAL_ENTRY(ia64_ret_from_clone) | 607 | GLOBAL_ENTRY(ia64_ret_from_clone) |
604 | PT_REGS_UNWIND_INFO(0) | 608 | PT_REGS_UNWIND_INFO(0) |
605 | { /* | 609 | { /* |
@@ -616,6 +620,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone) | |||
616 | br.call.sptk.many rp=ia64_invoke_schedule_tail | 620 | br.call.sptk.many rp=ia64_invoke_schedule_tail |
617 | } | 621 | } |
618 | .ret8: | 622 | .ret8: |
623 | (pKStk) br.call.sptk.many rp=call_payload | ||
619 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 | 624 | adds r2=TI_FLAGS+IA64_TASK_SIZE,r13 |
620 | ;; | 625 | ;; |
621 | ld4 r2=[r2] | 626 | ld4 r2=[r2] |
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 629a250f7c19..4738ff7bd66a 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S | |||
@@ -1093,19 +1093,6 @@ GLOBAL_ENTRY(cycle_to_cputime) | |||
1093 | END(cycle_to_cputime) | 1093 | END(cycle_to_cputime) |
1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 1094 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
1095 | 1095 | ||
1096 | GLOBAL_ENTRY(start_kernel_thread) | ||
1097 | .prologue | ||
1098 | .save rp, r0 // this is the end of the call-chain | ||
1099 | .body | ||
1100 | alloc r2 = ar.pfs, 0, 0, 2, 0 | ||
1101 | mov out0 = r9 | ||
1102 | mov out1 = r11;; | ||
1103 | br.call.sptk.many rp = kernel_thread_helper;; | ||
1104 | mov out0 = r8 | ||
1105 | br.call.sptk.many rp = sys_exit;; | ||
1106 | 1: br.sptk.few 1b // not reached | ||
1107 | END(start_kernel_thread) | ||
1108 | |||
1109 | #ifdef CONFIG_IA64_BRL_EMU | 1096 | #ifdef CONFIG_IA64_BRL_EMU |
1110 | 1097 | ||
1111 | /* | 1098 | /* |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 35e106f2ed13..31360cbbd5f8 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -393,72 +393,24 @@ ia64_load_extra (struct task_struct *task) | |||
393 | int | 393 | int |
394 | copy_thread(unsigned long clone_flags, | 394 | copy_thread(unsigned long clone_flags, |
395 | unsigned long user_stack_base, unsigned long user_stack_size, | 395 | unsigned long user_stack_base, unsigned long user_stack_size, |
396 | struct task_struct *p, struct pt_regs *regs) | 396 | struct task_struct *p) |
397 | { | 397 | { |
398 | extern char ia64_ret_from_clone; | 398 | extern char ia64_ret_from_clone; |
399 | struct switch_stack *child_stack, *stack; | 399 | struct switch_stack *child_stack, *stack; |
400 | unsigned long rbs, child_rbs, rbs_size; | 400 | unsigned long rbs, child_rbs, rbs_size; |
401 | struct pt_regs *child_ptregs; | 401 | struct pt_regs *child_ptregs; |
402 | struct pt_regs *regs = current_pt_regs(); | ||
402 | int retval = 0; | 403 | int retval = 0; |
403 | 404 | ||
404 | #ifdef CONFIG_SMP | ||
405 | /* | ||
406 | * For SMP idle threads, fork_by_hand() calls do_fork with | ||
407 | * NULL regs. | ||
408 | */ | ||
409 | if (!regs) | ||
410 | return 0; | ||
411 | #endif | ||
412 | |||
413 | stack = ((struct switch_stack *) regs) - 1; | ||
414 | |||
415 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; | 405 | child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1; |
416 | child_stack = (struct switch_stack *) child_ptregs - 1; | 406 | child_stack = (struct switch_stack *) child_ptregs - 1; |
417 | 407 | ||
418 | /* copy parent's switch_stack & pt_regs to child: */ | ||
419 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
420 | |||
421 | rbs = (unsigned long) current + IA64_RBS_OFFSET; | 408 | rbs = (unsigned long) current + IA64_RBS_OFFSET; |
422 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; | 409 | child_rbs = (unsigned long) p + IA64_RBS_OFFSET; |
423 | rbs_size = stack->ar_bspstore - rbs; | ||
424 | |||
425 | /* copy the parent's register backing store to the child: */ | ||
426 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | ||
427 | |||
428 | if (likely(user_mode(child_ptregs))) { | ||
429 | if (clone_flags & CLONE_SETTLS) | ||
430 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | ||
431 | if (user_stack_base) { | ||
432 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | ||
433 | child_ptregs->ar_bspstore = user_stack_base; | ||
434 | child_ptregs->ar_rnat = 0; | ||
435 | child_ptregs->loadrs = 0; | ||
436 | } | ||
437 | } else { | ||
438 | /* | ||
439 | * Note: we simply preserve the relative position of | ||
440 | * the stack pointer here. There is no need to | ||
441 | * allocate a scratch area here, since that will have | ||
442 | * been taken care of by the caller of sys_clone() | ||
443 | * already. | ||
444 | */ | ||
445 | child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */ | ||
446 | child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ | ||
447 | } | ||
448 | child_stack->ar_bspstore = child_rbs + rbs_size; | ||
449 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
450 | 410 | ||
451 | /* copy parts of thread_struct: */ | 411 | /* copy parts of thread_struct: */ |
452 | p->thread.ksp = (unsigned long) child_stack - 16; | 412 | p->thread.ksp = (unsigned long) child_stack - 16; |
453 | 413 | ||
454 | /* stop some PSR bits from being inherited. | ||
455 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
456 | * therefore we must specify them explicitly here and not include them in | ||
457 | * IA64_PSR_BITS_TO_CLEAR. | ||
458 | */ | ||
459 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
460 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
461 | |||
462 | /* | 414 | /* |
463 | * NOTE: The calling convention considers all floating point | 415 | * NOTE: The calling convention considers all floating point |
464 | * registers in the high partition (fph) to be scratch. Since | 416 | * registers in the high partition (fph) to be scratch. Since |
@@ -480,8 +432,66 @@ copy_thread(unsigned long clone_flags, | |||
480 | # define THREAD_FLAGS_TO_SET 0 | 432 | # define THREAD_FLAGS_TO_SET 0 |
481 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) | 433 | p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) |
482 | | THREAD_FLAGS_TO_SET); | 434 | | THREAD_FLAGS_TO_SET); |
435 | |||
483 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ | 436 | ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ |
484 | 437 | ||
438 | if (unlikely(p->flags & PF_KTHREAD)) { | ||
439 | if (unlikely(!user_stack_base)) { | ||
440 | /* fork_idle() called us */ | ||
441 | return 0; | ||
442 | } | ||
443 | memset(child_stack, 0, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
444 | child_stack->r4 = user_stack_base; /* payload */ | ||
445 | child_stack->r5 = user_stack_size; /* argument */ | ||
446 | /* | ||
447 | * Preserve PSR bits, except for bits 32-34 and 37-45, | ||
448 | * which we can't read. | ||
449 | */ | ||
450 | child_ptregs->cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; | ||
451 | /* mark as valid, empty frame */ | ||
452 | child_ptregs->cr_ifs = 1UL << 63; | ||
453 | child_stack->ar_fpsr = child_ptregs->ar_fpsr | ||
454 | = ia64_getreg(_IA64_REG_AR_FPSR); | ||
455 | child_stack->pr = (1 << PRED_KERNEL_STACK); | ||
456 | child_stack->ar_bspstore = child_rbs; | ||
457 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
458 | |||
459 | /* stop some PSR bits from being inherited. | ||
460 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
461 | * therefore we must specify them explicitly here and not include them in | ||
462 | * IA64_PSR_BITS_TO_CLEAR. | ||
463 | */ | ||
464 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
465 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
466 | |||
467 | return 0; | ||
468 | } | ||
469 | stack = ((struct switch_stack *) regs) - 1; | ||
470 | /* copy parent's switch_stack & pt_regs to child: */ | ||
471 | memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack)); | ||
472 | |||
473 | /* copy the parent's register backing store to the child: */ | ||
474 | rbs_size = stack->ar_bspstore - rbs; | ||
475 | memcpy((void *) child_rbs, (void *) rbs, rbs_size); | ||
476 | if (clone_flags & CLONE_SETTLS) | ||
477 | child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ | ||
478 | if (user_stack_base) { | ||
479 | child_ptregs->r12 = user_stack_base + user_stack_size - 16; | ||
480 | child_ptregs->ar_bspstore = user_stack_base; | ||
481 | child_ptregs->ar_rnat = 0; | ||
482 | child_ptregs->loadrs = 0; | ||
483 | } | ||
484 | child_stack->ar_bspstore = child_rbs + rbs_size; | ||
485 | child_stack->b0 = (unsigned long) &ia64_ret_from_clone; | ||
486 | |||
487 | /* stop some PSR bits from being inherited. | ||
488 | * the psr.up/psr.pp bits must be cleared on fork but inherited on execve() | ||
489 | * therefore we must specify them explicitly here and not include them in | ||
490 | * IA64_PSR_BITS_TO_CLEAR. | ||
491 | */ | ||
492 | child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET) | ||
493 | & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP)); | ||
494 | |||
485 | #ifdef CONFIG_PERFMON | 495 | #ifdef CONFIG_PERFMON |
486 | if (current->thread.pfm_context) | 496 | if (current->thread.pfm_context) |
487 | pfm_inherit(p, child_ptregs); | 497 | pfm_inherit(p, child_ptregs); |
@@ -608,57 +618,6 @@ dump_fpu (struct pt_regs *pt, elf_fpregset_t dst) | |||
608 | return 1; /* f0-f31 are always valid so we always return 1 */ | 618 | return 1; /* f0-f31 are always valid so we always return 1 */ |
609 | } | 619 | } |
610 | 620 | ||
611 | long | ||
612 | sys_execve (const char __user *filename, | ||
613 | const char __user *const __user *argv, | ||
614 | const char __user *const __user *envp, | ||
615 | struct pt_regs *regs) | ||
616 | { | ||
617 | struct filename *fname; | ||
618 | int error; | ||
619 | |||
620 | fname = getname(filename); | ||
621 | error = PTR_ERR(fname); | ||
622 | if (IS_ERR(fname)) | ||
623 | goto out; | ||
624 | error = do_execve(fname->name, argv, envp, regs); | ||
625 | putname(fname); | ||
626 | out: | ||
627 | return error; | ||
628 | } | ||
629 | |||
630 | pid_t | ||
631 | kernel_thread (int (*fn)(void *), void *arg, unsigned long flags) | ||
632 | { | ||
633 | extern void start_kernel_thread (void); | ||
634 | unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread; | ||
635 | struct { | ||
636 | struct switch_stack sw; | ||
637 | struct pt_regs pt; | ||
638 | } regs; | ||
639 | |||
640 | memset(®s, 0, sizeof(regs)); | ||
641 | regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */ | ||
642 | regs.pt.r1 = helper_fptr[1]; /* set GP */ | ||
643 | regs.pt.r9 = (unsigned long) fn; /* 1st argument */ | ||
644 | regs.pt.r11 = (unsigned long) arg; /* 2nd argument */ | ||
645 | /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */ | ||
646 | regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN; | ||
647 | regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */ | ||
648 | regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR); | ||
649 | regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET; | ||
650 | regs.sw.pr = (1 << PRED_KERNEL_STACK); | ||
651 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s.pt, 0, NULL, NULL); | ||
652 | } | ||
653 | EXPORT_SYMBOL(kernel_thread); | ||
654 | |||
655 | /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */ | ||
656 | int | ||
657 | kernel_thread_helper (int (*fn)(void *), void *arg) | ||
658 | { | ||
659 | return (*fn)(arg); | ||
660 | } | ||
661 | |||
662 | /* | 621 | /* |
663 | * Flush thread state. This is called when a thread does an execve(). | 622 | * Flush thread state. This is called when a thread does an execve(). |
664 | */ | 623 | */ |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 963d2db53bfa..6a368cb2043e 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -460,11 +460,6 @@ start_secondary (void *unused) | |||
460 | return 0; | 460 | return 0; |
461 | } | 461 | } |
462 | 462 | ||
463 | struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | ||
464 | { | ||
465 | return NULL; | ||
466 | } | ||
467 | |||
468 | static int __cpuinit | 463 | static int __cpuinit |
469 | do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) | 464 | do_boot_cpu (int sapicid, int cpu, struct task_struct *idle) |
470 | { | 465 | { |