diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/posix-cpu-timers.c | 76 |
1 files changed, 44 insertions, 32 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9641958ddb3e..d9dc5edc318c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Sample a process (thread group) clock for the given group_leader task. | 235 | * Sample a process (thread group) clock for the given group_leader task. |
236 | * Must be called with tasklist_lock held for reading. | 236 | * Must be called with task sighand lock held for safe while_each_thread() |
237 | * traversal. | ||
237 | */ | 238 | */ |
238 | static int cpu_clock_sample_group(const clockid_t which_clock, | 239 | static int cpu_clock_sample_group(const clockid_t which_clock, |
239 | struct task_struct *p, | 240 | struct task_struct *p, |
@@ -455,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp) | |||
455 | 456 | ||
456 | /* | 457 | /* |
457 | * Insert the timer on the appropriate list before any timers that | 458 | * Insert the timer on the appropriate list before any timers that |
458 | * expire later. This must be called with the tasklist_lock held | 459 | * expire later. This must be called with the sighand lock held. |
459 | * for reading, interrupts disabled and p->sighand->siglock taken. | ||
460 | */ | 460 | */ |
461 | static void arm_timer(struct k_itimer *timer) | 461 | static void arm_timer(struct k_itimer *timer) |
462 | { | 462 | { |
@@ -547,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
547 | 547 | ||
548 | /* | 548 | /* |
549 | * Sample a process (thread group) timer for the given group_leader task. | 549 | * Sample a process (thread group) timer for the given group_leader task. |
550 | * Must be called with tasklist_lock held for reading. | 550 | * Must be called with task sighand lock held for safe while_each_thread() |
551 | * traversal. | ||
551 | */ | 552 | */ |
552 | static int cpu_timer_sample_group(const clockid_t which_clock, | 553 | static int cpu_timer_sample_group(const clockid_t which_clock, |
553 | struct task_struct *p, | 554 | struct task_struct *p, |
@@ -610,9 +611,11 @@ static inline void posix_cpu_timer_kick_nohz(void) { } | |||
610 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 611 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
611 | * and try again. (This happens when the timer is in the middle of firing.) | 612 | * and try again. (This happens when the timer is in the middle of firing.) |
612 | */ | 613 | */ |
613 | static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | 614 | static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, |
614 | struct itimerspec *new, struct itimerspec *old) | 615 | struct itimerspec *new, struct itimerspec *old) |
615 | { | 616 | { |
617 | unsigned long flags; | ||
618 | struct sighand_struct *sighand; | ||
616 | struct task_struct *p = timer->it.cpu.task; | 619 | struct task_struct *p = timer->it.cpu.task; |
617 | unsigned long long old_expires, new_expires, old_incr, val; | 620 | unsigned long long old_expires, new_expires, old_incr, val; |
618 | int ret; | 621 | int ret; |
@@ -621,14 +624,16 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
621 | 624 | ||
622 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); | 625 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
623 | 626 | ||
624 | read_lock(&tasklist_lock); | ||
625 | /* | 627 | /* |
626 | * We need the tasklist_lock to protect against reaping that | 628 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
627 | * clears p->sighand. If p has just been reaped, we can no | 629 | * and p->signal->cpu_timers read/write in arm_timer() |
630 | */ | ||
631 | sighand = lock_task_sighand(p, &flags); | ||
632 | /* | ||
633 | * If p has just been reaped, we can no | ||
628 | * longer get any information about it at all. | 634 | * longer get any information about it at all. |
629 | */ | 635 | */ |
630 | if (unlikely(p->sighand == NULL)) { | 636 | if (unlikely(sighand == NULL)) { |
631 | read_unlock(&tasklist_lock); | ||
632 | return -ESRCH; | 637 | return -ESRCH; |
633 | } | 638 | } |
634 | 639 | ||
@@ -639,7 +644,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
639 | 644 | ||
640 | ret = 0; | 645 | ret = 0; |
641 | old_incr = timer->it.cpu.incr; | 646 | old_incr = timer->it.cpu.incr; |
642 | spin_lock(&p->sighand->siglock); | ||
643 | old_expires = timer->it.cpu.expires; | 647 | old_expires = timer->it.cpu.expires; |
644 | if (unlikely(timer->it.cpu.firing)) { | 648 | if (unlikely(timer->it.cpu.firing)) { |
645 | timer->it.cpu.firing = -1; | 649 | timer->it.cpu.firing = -1; |
@@ -696,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
696 | * disable this firing since we are already reporting | 700 | * disable this firing since we are already reporting |
697 | * it as an overrun (thanks to bump_cpu_timer above). | 701 | * it as an overrun (thanks to bump_cpu_timer above). |
698 | */ | 702 | */ |
699 | spin_unlock(&p->sighand->siglock); | 703 | unlock_task_sighand(p, &flags); |
700 | read_unlock(&tasklist_lock); | ||
701 | goto out; | 704 | goto out; |
702 | } | 705 | } |
703 | 706 | ||
704 | if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { | 707 | if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) { |
705 | new_expires += val; | 708 | new_expires += val; |
706 | } | 709 | } |
707 | 710 | ||
@@ -715,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
715 | arm_timer(timer); | 718 | arm_timer(timer); |
716 | } | 719 | } |
717 | 720 | ||
718 | spin_unlock(&p->sighand->siglock); | 721 | unlock_task_sighand(p, &flags); |
719 | read_unlock(&tasklist_lock); | ||
720 | |||
721 | /* | 722 | /* |
722 | * Install the new reload setting, and | 723 | * Install the new reload setting, and |
723 | * set up the signal and overrun bookkeeping. | 724 | * set up the signal and overrun bookkeeping. |
@@ -779,8 +780,16 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
779 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 780 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
780 | cpu_clock_sample(timer->it_clock, p, &now); | 781 | cpu_clock_sample(timer->it_clock, p, &now); |
781 | } else { | 782 | } else { |
782 | read_lock(&tasklist_lock); | 783 | struct sighand_struct *sighand; |
783 | if (unlikely(p->sighand == NULL)) { | 784 | unsigned long flags; |
785 | |||
786 | /* | ||
787 | * Protect against sighand release/switch in exit/exec and | ||
788 | * also make timer sampling safe if it ends up calling | ||
789 | * thread_group_cputime(). | ||
790 | */ | ||
791 | sighand = lock_task_sighand(p, &flags); | ||
792 | if (unlikely(sighand == NULL)) { | ||
784 | /* | 793 | /* |
785 | * The process has been reaped. | 794 | * The process has been reaped. |
786 | * We can't even collect a sample any more. | 795 | * We can't even collect a sample any more. |
@@ -789,11 +798,10 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
789 | timer->it.cpu.expires = 0; | 798 | timer->it.cpu.expires = 0; |
790 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, | 799 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
791 | &itp->it_value); | 800 | &itp->it_value); |
792 | read_unlock(&tasklist_lock); | ||
793 | } else { | 801 | } else { |
794 | cpu_timer_sample_group(timer->it_clock, p, &now); | 802 | cpu_timer_sample_group(timer->it_clock, p, &now); |
803 | unlock_task_sighand(p, &flags); | ||
795 | } | 804 | } |
796 | read_unlock(&tasklist_lock); | ||
797 | } | 805 | } |
798 | 806 | ||
799 | if (now < timer->it.cpu.expires) { | 807 | if (now < timer->it.cpu.expires) { |
@@ -1007,6 +1015,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1007 | */ | 1015 | */ |
1008 | void posix_cpu_timer_schedule(struct k_itimer *timer) | 1016 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1009 | { | 1017 | { |
1018 | struct sighand_struct *sighand; | ||
1019 | unsigned long flags; | ||
1010 | struct task_struct *p = timer->it.cpu.task; | 1020 | struct task_struct *p = timer->it.cpu.task; |
1011 | unsigned long long now; | 1021 | unsigned long long now; |
1012 | 1022 | ||
@@ -1021,27 +1031,31 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1021 | if (unlikely(p->exit_state)) | 1031 | if (unlikely(p->exit_state)) |
1022 | goto out; | 1032 | goto out; |
1023 | 1033 | ||
1024 | read_lock(&tasklist_lock); /* arm_timer needs it. */ | 1034 | /* Protect timer list r/w in arm_timer() */ |
1025 | spin_lock(&p->sighand->siglock); | 1035 | sighand = lock_task_sighand(p, &flags); |
1036 | if (!sighand) | ||
1037 | goto out; | ||
1026 | } else { | 1038 | } else { |
1027 | read_lock(&tasklist_lock); | 1039 | /* |
1028 | if (unlikely(p->sighand == NULL)) { | 1040 | * Protect arm_timer() and timer sampling in case of call to |
1041 | * thread_group_cputime(). | ||
1042 | */ | ||
1043 | sighand = lock_task_sighand(p, &flags); | ||
1044 | if (unlikely(sighand == NULL)) { | ||
1029 | /* | 1045 | /* |
1030 | * The process has been reaped. | 1046 | * The process has been reaped. |
1031 | * We can't even collect a sample any more. | 1047 | * We can't even collect a sample any more. |
1032 | */ | 1048 | */ |
1033 | timer->it.cpu.expires = 0; | 1049 | timer->it.cpu.expires = 0; |
1034 | read_unlock(&tasklist_lock); | ||
1035 | goto out; | 1050 | goto out; |
1036 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { | 1051 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1037 | read_unlock(&tasklist_lock); | 1052 | unlock_task_sighand(p, &flags); |
1038 | /* Optimizations: if the process is dying, no need to rearm */ | 1053 | /* Optimizations: if the process is dying, no need to rearm */ |
1039 | goto out; | 1054 | goto out; |
1040 | } | 1055 | } |
1041 | spin_lock(&p->sighand->siglock); | ||
1042 | cpu_timer_sample_group(timer->it_clock, p, &now); | 1056 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1043 | bump_cpu_timer(timer, now); | 1057 | bump_cpu_timer(timer, now); |
1044 | /* Leave the tasklist_lock locked for the call below. */ | 1058 | /* Leave the sighand locked for the call below. */ |
1045 | } | 1059 | } |
1046 | 1060 | ||
1047 | /* | 1061 | /* |
@@ -1049,12 +1063,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1049 | */ | 1063 | */ |
1050 | BUG_ON(!irqs_disabled()); | 1064 | BUG_ON(!irqs_disabled()); |
1051 | arm_timer(timer); | 1065 | arm_timer(timer); |
1052 | spin_unlock(&p->sighand->siglock); | 1066 | unlock_task_sighand(p, &flags); |
1053 | read_unlock(&tasklist_lock); | ||
1054 | 1067 | ||
1055 | /* Kick full dynticks CPUs in case they need to tick on the new timer */ | 1068 | /* Kick full dynticks CPUs in case they need to tick on the new timer */ |
1056 | posix_cpu_timer_kick_nohz(); | 1069 | posix_cpu_timer_kick_nohz(); |
1057 | |||
1058 | out: | 1070 | out: |
1059 | timer->it_overrun_last = timer->it_overrun; | 1071 | timer->it_overrun_last = timer->it_overrun; |
1060 | timer->it_overrun = -1; | 1072 | timer->it_overrun = -1; |