aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 29963af782ae..f350f7b15158 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -116,6 +116,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
116 inc_cpu_load(rq, p->se.load.weight); 116 inc_cpu_load(rq, p->se.load.weight);
117 117
118 inc_rt_tasks(p, rq); 118 inc_rt_tasks(p, rq);
119
120 if (wakeup)
121 p->rt.timeout = 0;
119} 122}
120 123
121/* 124/*
@@ -834,11 +837,38 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
834 } 837 }
835} 838}
836 839
840static void watchdog(struct rq *rq, struct task_struct *p)
841{
842 unsigned long soft, hard;
843
844 if (!p->signal)
845 return;
846
847 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
848 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
849
850 if (soft != RLIM_INFINITY) {
851 unsigned long next;
852
853 p->rt.timeout++;
854 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
855 if (next > p->rt.timeout) {
856 u64 next_time = p->se.sum_exec_runtime;
857
858 next_time += next * (NSEC_PER_SEC/HZ);
859 if (p->it_sched_expires > next_time)
860 p->it_sched_expires = next_time;
861 } else
862 p->it_sched_expires = p->se.sum_exec_runtime;
863 }
864}
837 865
838static void task_tick_rt(struct rq *rq, struct task_struct *p) 866static void task_tick_rt(struct rq *rq, struct task_struct *p)
839{ 867{
840 update_curr_rt(rq); 868 update_curr_rt(rq);
841 869
870 watchdog(rq, p);
871
842 /* 872 /*
843 * RR tasks need a special form of timeslice management. 873 * RR tasks need a special form of timeslice management.
844 * FIFO tasks have no timeslices. 874 * FIFO tasks have no timeslices.