aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 5c9dc228747b..bc7704b3a443 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -384,7 +384,8 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
384 384
385/* 385/*
386 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. 386 * Validate the clockid_t for a new CPU-clock timer, and initialize the timer.
387 * This is called from sys_timer_create with the new timer already locked. 387 * This is called from sys_timer_create() and do_cpu_nanosleep() with the
388 * new timer already all-zeros initialized.
388 */ 389 */
389int posix_cpu_timer_create(struct k_itimer *new_timer) 390int posix_cpu_timer_create(struct k_itimer *new_timer)
390{ 391{
@@ -396,8 +397,6 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
396 return -EINVAL; 397 return -EINVAL;
397 398
398 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 399 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
399 new_timer->it.cpu.incr.sched = 0;
400 new_timer->it.cpu.expires.sched = 0;
401 400
402 read_lock(&tasklist_lock); 401 read_lock(&tasklist_lock);
403 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 402 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
@@ -983,6 +982,7 @@ static void check_thread_timers(struct task_struct *tsk,
983 int maxfire; 982 int maxfire;
984 struct list_head *timers = tsk->cpu_timers; 983 struct list_head *timers = tsk->cpu_timers;
985 struct signal_struct *const sig = tsk->signal; 984 struct signal_struct *const sig = tsk->signal;
985 unsigned long soft;
986 986
987 maxfire = 20; 987 maxfire = 20;
988 tsk->cputime_expires.prof_exp = cputime_zero; 988 tsk->cputime_expires.prof_exp = cputime_zero;
@@ -1031,9 +1031,10 @@ static void check_thread_timers(struct task_struct *tsk,
1031 /* 1031 /*
1032 * Check for the special case thread timers. 1032 * Check for the special case thread timers.
1033 */ 1033 */
1034 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { 1034 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
1035 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; 1035 if (soft != RLIM_INFINITY) {
1036 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; 1036 unsigned long hard =
1037 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
1037 1038
1038 if (hard != RLIM_INFINITY && 1039 if (hard != RLIM_INFINITY &&
1039 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 1040 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -1044,14 +1045,13 @@ static void check_thread_timers(struct task_struct *tsk,
1044 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 1045 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1045 return; 1046 return;
1046 } 1047 }
1047 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { 1048 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
1048 /* 1049 /*
1049 * At the soft limit, send a SIGXCPU every second. 1050 * At the soft limit, send a SIGXCPU every second.
1050 */ 1051 */
1051 if (sig->rlim[RLIMIT_RTTIME].rlim_cur 1052 if (soft < hard) {
1052 < sig->rlim[RLIMIT_RTTIME].rlim_max) { 1053 soft += USEC_PER_SEC;
1053 sig->rlim[RLIMIT_RTTIME].rlim_cur += 1054 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
1054 USEC_PER_SEC;
1055 } 1055 }
1056 printk(KERN_INFO 1056 printk(KERN_INFO
1057 "RT Watchdog Timeout: %s[%d]\n", 1057 "RT Watchdog Timeout: %s[%d]\n",
@@ -1061,9 +1061,9 @@ static void check_thread_timers(struct task_struct *tsk,
1061 } 1061 }
1062} 1062}
1063 1063
1064static void stop_process_timers(struct task_struct *tsk) 1064static void stop_process_timers(struct signal_struct *sig)
1065{ 1065{
1066 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 1066 struct thread_group_cputimer *cputimer = &sig->cputimer;
1067 unsigned long flags; 1067 unsigned long flags;
1068 1068
1069 if (!cputimer->running) 1069 if (!cputimer->running)
@@ -1072,6 +1072,10 @@ static void stop_process_timers(struct task_struct *tsk)
1072 spin_lock_irqsave(&cputimer->lock, flags); 1072 spin_lock_irqsave(&cputimer->lock, flags);
1073 cputimer->running = 0; 1073 cputimer->running = 0;
1074 spin_unlock_irqrestore(&cputimer->lock, flags); 1074 spin_unlock_irqrestore(&cputimer->lock, flags);
1075
1076 sig->cputime_expires.prof_exp = cputime_zero;
1077 sig->cputime_expires.virt_exp = cputime_zero;
1078 sig->cputime_expires.sched_exp = 0;
1075} 1079}
1076 1080
1077static u32 onecputick; 1081static u32 onecputick;
@@ -1122,6 +1126,7 @@ static void check_process_timers(struct task_struct *tsk,
1122 unsigned long long sum_sched_runtime, sched_expires; 1126 unsigned long long sum_sched_runtime, sched_expires;
1123 struct list_head *timers = sig->cpu_timers; 1127 struct list_head *timers = sig->cpu_timers;
1124 struct task_cputime cputime; 1128 struct task_cputime cputime;
1129 unsigned long soft;
1125 1130
1126 /* 1131 /*
1127 * Don't sample the current process CPU clocks if there are no timers. 1132 * Don't sample the current process CPU clocks if there are no timers.
@@ -1132,7 +1137,7 @@ static void check_process_timers(struct task_struct *tsk,
1132 list_empty(&timers[CPUCLOCK_VIRT]) && 1137 list_empty(&timers[CPUCLOCK_VIRT]) &&
1133 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && 1138 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1134 list_empty(&timers[CPUCLOCK_SCHED])) { 1139 list_empty(&timers[CPUCLOCK_SCHED])) {
1135 stop_process_timers(tsk); 1140 stop_process_timers(sig);
1136 return; 1141 return;
1137 } 1142 }
1138 1143
@@ -1194,11 +1199,13 @@ static void check_process_timers(struct task_struct *tsk,
1194 SIGPROF); 1199 SIGPROF);
1195 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 1200 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1196 SIGVTALRM); 1201 SIGVTALRM);
1197 1202 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1198 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1203 if (soft != RLIM_INFINITY) {
1199 unsigned long psecs = cputime_to_secs(ptime); 1204 unsigned long psecs = cputime_to_secs(ptime);
1205 unsigned long hard =
1206 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1200 cputime_t x; 1207 cputime_t x;
1201 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { 1208 if (psecs >= hard) {
1202 /* 1209 /*
1203 * At the hard limit, we just die. 1210 * At the hard limit, we just die.
1204 * No need to calculate anything else now. 1211 * No need to calculate anything else now.
@@ -1206,17 +1213,17 @@ static void check_process_timers(struct task_struct *tsk,
1206 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 1213 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1207 return; 1214 return;
1208 } 1215 }
1209 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { 1216 if (psecs >= soft) {
1210 /* 1217 /*
1211 * At the soft limit, send a SIGXCPU every second. 1218 * At the soft limit, send a SIGXCPU every second.
1212 */ 1219 */
1213 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 1220 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1214 if (sig->rlim[RLIMIT_CPU].rlim_cur 1221 if (soft < hard) {
1215 < sig->rlim[RLIMIT_CPU].rlim_max) { 1222 soft++;
1216 sig->rlim[RLIMIT_CPU].rlim_cur++; 1223 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1217 } 1224 }
1218 } 1225 }
1219 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 1226 x = secs_to_cputime(soft);
1220 if (cputime_eq(prof_expires, cputime_zero) || 1227 if (cputime_eq(prof_expires, cputime_zero) ||
1221 cputime_lt(x, prof_expires)) { 1228 cputime_lt(x, prof_expires)) {
1222 prof_expires = x; 1229 prof_expires = x;