aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/posix-cpu-timers.c29
-rw-r--r--kernel/sched_rt.c30
2 files changed, 59 insertions, 0 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 68c96376e84a..2c076b36c4f6 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -967,6 +967,7 @@ static void check_thread_timers(struct task_struct *tsk,
967{ 967{
968 int maxfire; 968 int maxfire;
969 struct list_head *timers = tsk->cpu_timers; 969 struct list_head *timers = tsk->cpu_timers;
970 struct signal_struct *const sig = tsk->signal;
970 971
971 maxfire = 20; 972 maxfire = 20;
972 tsk->it_prof_expires = cputime_zero; 973 tsk->it_prof_expires = cputime_zero;
@@ -1011,6 +1012,34 @@ static void check_thread_timers(struct task_struct *tsk,
1011 t->firing = 1; 1012 t->firing = 1;
1012 list_move_tail(&t->entry, firing); 1013 list_move_tail(&t->entry, firing);
1013 } 1014 }
1015
1016 /*
1017 * Check for the special case thread timers.
1018 */
1019 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) {
1020 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1021 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1022
1023 if (tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
1024 /*
1025 * At the hard limit, we just die.
1026 * No need to calculate anything else now.
1027 */
1028 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1029 return;
1030 }
1031 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) {
1032 /*
1033 * At the soft limit, send a SIGXCPU every second.
1034 */
1035 if (sig->rlim[RLIMIT_RTTIME].rlim_cur
1036 < sig->rlim[RLIMIT_RTTIME].rlim_max) {
1037 sig->rlim[RLIMIT_RTTIME].rlim_cur +=
1038 USEC_PER_SEC;
1039 }
1040 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1041 }
1042 }
1014} 1043}
1015 1044
1016/* 1045/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 29963af782ae..f350f7b15158 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -116,6 +116,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
116 inc_cpu_load(rq, p->se.load.weight); 116 inc_cpu_load(rq, p->se.load.weight);
117 117
118 inc_rt_tasks(p, rq); 118 inc_rt_tasks(p, rq);
119
120 if (wakeup)
121 p->rt.timeout = 0;
119} 122}
120 123
121/* 124/*
@@ -834,11 +837,38 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
834 } 837 }
835} 838}
836 839
840static void watchdog(struct rq *rq, struct task_struct *p)
841{
842 unsigned long soft, hard;
843
844 if (!p->signal)
845 return;
846
847 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
848 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
849
850 if (soft != RLIM_INFINITY) {
851 unsigned long next;
852
853 p->rt.timeout++;
854 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
855 if (next > p->rt.timeout) {
856 u64 next_time = p->se.sum_exec_runtime;
857
858 next_time += next * (NSEC_PER_SEC/HZ);
859 if (p->it_sched_expires > next_time)
860 p->it_sched_expires = next_time;
861 } else
862 p->it_sched_expires = p->se.sum_exec_runtime;
863 }
864}
837 865
838static void task_tick_rt(struct rq *rq, struct task_struct *p) 866static void task_tick_rt(struct rq *rq, struct task_struct *p)
839{ 867{
840 update_curr_rt(rq); 868 update_curr_rt(rq);
841 869
870 watchdog(rq, p);
871
842 /* 872 /*
843 * RR tasks need a special form of timeslice management. 873 * RR tasks need a special form of timeslice management.
844 * FIFO tasks have no timeslices. 874 * FIFO tasks have no timeslices.