aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2006-03-21 11:17:38 -0500
committerTony Luck <tony.luck@intel.com>2006-03-21 11:17:38 -0500
commit581249966ffeb0463bad1b0e087e1bb29ed53707 (patch)
treed0cc7c15ff2cd9c65799a114b0f8e2257765f1e6
parent6a8a8e14bed179f2d415c7f4fee51233c35c10c3 (diff)
parentb0a06623dc4caf6dfb6a84419507643471676d20 (diff)
Pull delete-sigdelayed into release branch
-rw-r--r--arch/ia64/kernel/entry.S14
-rw-r--r--arch/ia64/kernel/signal.c101
-rw-r--r--include/asm-ia64/signal.h2
-rw-r--r--include/asm-ia64/thread_info.h11
4 files changed, 1 insertions, 127 deletions
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 930fdfca6ddb..0e3eda99e549 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1102,9 +1102,6 @@ skip_rbs_switch:
1102 st8 [r2]=r8 1102 st8 [r2]=r8
1103 st8 [r3]=r10 1103 st8 [r3]=r10
1104.work_pending: 1104.work_pending:
1105 tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
1106(p6) br.cond.sptk.few .sigdelayed
1107 ;;
1108 tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0? 1105 tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
1109(p6) br.cond.sptk.few .notify 1106(p6) br.cond.sptk.few .notify
1110#ifdef CONFIG_PREEMPT 1107#ifdef CONFIG_PREEMPT
@@ -1131,17 +1128,6 @@ skip_rbs_switch:
1131(pLvSys)br.cond.sptk.few .work_pending_syscall_end 1128(pLvSys)br.cond.sptk.few .work_pending_syscall_end
1132 br.cond.sptk.many .work_processed_kernel // don't re-check 1129 br.cond.sptk.many .work_processed_kernel // don't re-check
1133 1130
1134// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
1135// it could not be delivered. Deliver it now. The signal might be for us and
1136// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
1137// signal.
1138
1139.sigdelayed:
1140 br.call.sptk.many rp=do_sigdelayed
1141 cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
1142(pLvSys)br.cond.sptk.few .work_pending_syscall_end
1143 br.cond.sptk.many .work_processed_kernel // re-check
1144
1145.work_pending_syscall_end: 1131.work_pending_syscall_end:
1146 adds r2=PT(R8)+16,r12 1132 adds r2=PT(R8)+16,r12
1147 adds r3=PT(R10)+16,r12 1133 adds r3=PT(R10)+16,r12
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 463f6bb44d07..1d7903ee2126 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -588,104 +588,3 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
588 } 588 }
589 return 0; 589 return 0;
590} 590}
591
592/* Set a delayed signal that was detected in MCA/INIT/NMI/PMI context where it
593 * could not be delivered. It is important that the target process is not
594 * allowed to do any more work in user space. Possible cases for the target
595 * process:
596 *
597 * - It is sleeping and will wake up soon. Store the data in the current task,
598 * the signal will be sent when the current task returns from the next
599 * interrupt.
600 *
601 * - It is running in user context. Store the data in the current task, the
602 * signal will be sent when the current task returns from the next interrupt.
603 *
604 * - It is running in kernel context on this or another cpu and will return to
605 * user context. Store the data in the target task, the signal will be sent
606 * to itself when the target task returns to user space.
607 *
608 * - It is running in kernel context on this cpu and will sleep before
609 * returning to user context. Because this is also the current task, the
610 * signal will not get delivered and the task could sleep indefinitely.
611 * Store the data in the idle task for this cpu, the signal will be sent
612 * after the idle task processes its next interrupt.
613 *
614 * To cover all cases, store the data in the target task, the current task and
615 * the idle task on this cpu. Whatever happens, the signal will be delivered
616 * to the target task before it can do any useful user space work. Multiple
617 * deliveries have no unwanted side effects.
618 *
619 * Note: This code is executed in MCA/INIT/NMI/PMI context, with interrupts
620 * disabled. It must not take any locks nor use kernel structures or services
621 * that require locks.
622 */
623
624/* To ensure that we get the right pid, check its start time. To avoid extra
625 * include files in thread_info.h, convert the task start_time to unsigned long,
626 * giving us a cycle time of > 580 years.
627 */
628static inline unsigned long
629start_time_ul(const struct task_struct *t)
630{
631 return t->start_time.tv_sec * NSEC_PER_SEC + t->start_time.tv_nsec;
632}
633
634void
635set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
636{
637 struct task_struct *t;
638 unsigned long start_time = 0;
639 int i;
640
641 for (i = 1; i <= 3; ++i) {
642 switch (i) {
643 case 1:
644 t = find_task_by_pid(pid);
645 if (t)
646 start_time = start_time_ul(t);
647 break;
648 case 2:
649 t = current;
650 break;
651 default:
652 t = idle_task(smp_processor_id());
653 break;
654 }
655
656 if (!t)
657 return;
658 task_thread_info(t)->sigdelayed.signo = signo;
659 task_thread_info(t)->sigdelayed.code = code;
660 task_thread_info(t)->sigdelayed.addr = addr;
661 task_thread_info(t)->sigdelayed.start_time = start_time;
662 task_thread_info(t)->sigdelayed.pid = pid;
663 wmb();
664 set_tsk_thread_flag(t, TIF_SIGDELAYED);
665 }
666}
667
668/* Called from entry.S when it detects TIF_SIGDELAYED, a delayed signal that
669 * was detected in MCA/INIT/NMI/PMI context where it could not be delivered.
670 */
671
672void
673do_sigdelayed(void)
674{
675 struct siginfo siginfo;
676 pid_t pid;
677 struct task_struct *t;
678
679 clear_thread_flag(TIF_SIGDELAYED);
680 memset(&siginfo, 0, sizeof(siginfo));
681 siginfo.si_signo = current_thread_info()->sigdelayed.signo;
682 siginfo.si_code = current_thread_info()->sigdelayed.code;
683 siginfo.si_addr = current_thread_info()->sigdelayed.addr;
684 pid = current_thread_info()->sigdelayed.pid;
685 t = find_task_by_pid(pid);
686 if (!t)
687 return;
688 if (current_thread_info()->sigdelayed.start_time != start_time_ul(t))
689 return;
690 force_sig_info(siginfo.si_signo, &siginfo, t);
691}
diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h
index 608168d713d3..5e328ed5d01d 100644
--- a/include/asm-ia64/signal.h
+++ b/include/asm-ia64/signal.h
@@ -158,8 +158,6 @@ struct k_sigaction {
158 158
159#define ptrace_signal_deliver(regs, cookie) do { } while (0) 159#define ptrace_signal_deliver(regs, cookie) do { } while (0)
160 160
161void set_sigdelayed(pid_t pid, int signo, int code, void __user *addr);
162
163#endif /* __KERNEL__ */ 161#endif /* __KERNEL__ */
164 162
165# endif /* !__ASSEMBLY__ */ 163# endif /* !__ASSEMBLY__ */
diff --git a/include/asm-ia64/thread_info.h b/include/asm-ia64/thread_info.h
index 1d6518fe1f02..a6ee27343a4a 100644
--- a/include/asm-ia64/thread_info.h
+++ b/include/asm-ia64/thread_info.h
@@ -29,13 +29,6 @@ struct thread_info {
29 mm_segment_t addr_limit; /* user-level address space limit */ 29 mm_segment_t addr_limit; /* user-level address space limit */
30 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 30 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
31 struct restart_block restart_block; 31 struct restart_block restart_block;
32 struct {
33 int signo;
34 int code;
35 void __user *addr;
36 unsigned long start_time;
37 pid_t pid;
38 } sigdelayed; /* Saved information for TIF_SIGDELAYED */
39}; 32};
40 33
41#define THREAD_SIZE KERNEL_STACK_SIZE 34#define THREAD_SIZE KERNEL_STACK_SIZE
@@ -89,7 +82,6 @@ struct thread_info {
89#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ 82#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
90#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ 83#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
91#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */ 84#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
92#define TIF_SIGDELAYED 5 /* signal delayed from MCA/INIT/NMI/PMI context */
93#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 85#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
94#define TIF_MEMDIE 17 86#define TIF_MEMDIE 17
95#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */ 87#define TIF_MCA_INIT 18 /* this task is processing MCA or INIT */
@@ -101,13 +93,12 @@ struct thread_info {
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 93#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
102#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) 94#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
103#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) 95#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
104#define _TIF_SIGDELAYED (1 << TIF_SIGDELAYED)
105#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 96#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
106#define _TIF_MCA_INIT (1 << TIF_MCA_INIT) 97#define _TIF_MCA_INIT (1 << TIF_MCA_INIT)
107#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED) 98#define _TIF_DB_DISABLED (1 << TIF_DB_DISABLED)
108 99
109/* "work to do on user-return" bits */ 100/* "work to do on user-return" bits */
110#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED) 101#define TIF_ALLWORK_MASK (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
111/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */ 102/* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
112#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)) 103#define TIF_WORK_MASK (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
113 104