aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJiri Slaby <jslaby@suse.cz>2010-03-05 16:42:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:32 -0500
commitd4bb527438b4181cd3c564ae04dd344c381283a1 (patch)
tree0e7afeb6b759f72b136d707db4e5725a92b0d777 /kernel
parentf3abd4f9531becb71626bd206955d47d5ea54f06 (diff)
posix-cpu-timers: cleanup rlimits usage
Fetch rlimit (both hard and soft) values only once and work on them. It removes many accesses through sig structure and makes the code cleaner. Mostly a preparation for writable resource limits support. Signed-off-by: Jiri Slaby <jslaby@suse.cz> Cc: Ingo Molnar <mingo@elte.hu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: john stultz <johnstul@us.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/posix-cpu-timers.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 438ff4523513..dbb16bf15c45 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -982,6 +982,7 @@ static void check_thread_timers(struct task_struct *tsk,
982 int maxfire; 982 int maxfire;
983 struct list_head *timers = tsk->cpu_timers; 983 struct list_head *timers = tsk->cpu_timers;
984 struct signal_struct *const sig = tsk->signal; 984 struct signal_struct *const sig = tsk->signal;
985 unsigned long soft;
985 986
986 maxfire = 20; 987 maxfire = 20;
987 tsk->cputime_expires.prof_exp = cputime_zero; 988 tsk->cputime_expires.prof_exp = cputime_zero;
@@ -1030,9 +1031,9 @@ static void check_thread_timers(struct task_struct *tsk,
1030 /* 1031 /*
1031 * Check for the special case thread timers. 1032 * Check for the special case thread timers.
1032 */ 1033 */
1033 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { 1034 soft = sig->rlim[RLIMIT_RTTIME].rlim_cur;
1035 if (soft != RLIM_INFINITY) {
1034 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; 1036 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
1035 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur;
1036 1037
1037 if (hard != RLIM_INFINITY && 1038 if (hard != RLIM_INFINITY &&
1038 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 1039 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -1043,14 +1044,13 @@ static void check_thread_timers(struct task_struct *tsk,
1043 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 1044 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1044 return; 1045 return;
1045 } 1046 }
1046 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { 1047 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
1047 /* 1048 /*
1048 * At the soft limit, send a SIGXCPU every second. 1049 * At the soft limit, send a SIGXCPU every second.
1049 */ 1050 */
1050 if (sig->rlim[RLIMIT_RTTIME].rlim_cur 1051 if (soft < hard) {
1051 < sig->rlim[RLIMIT_RTTIME].rlim_max) { 1052 soft += USEC_PER_SEC;
1052 sig->rlim[RLIMIT_RTTIME].rlim_cur += 1053 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
1053 USEC_PER_SEC;
1054 } 1054 }
1055 printk(KERN_INFO 1055 printk(KERN_INFO
1056 "RT Watchdog Timeout: %s[%d]\n", 1056 "RT Watchdog Timeout: %s[%d]\n",
@@ -1121,6 +1121,7 @@ static void check_process_timers(struct task_struct *tsk,
1121 unsigned long long sum_sched_runtime, sched_expires; 1121 unsigned long long sum_sched_runtime, sched_expires;
1122 struct list_head *timers = sig->cpu_timers; 1122 struct list_head *timers = sig->cpu_timers;
1123 struct task_cputime cputime; 1123 struct task_cputime cputime;
1124 unsigned long soft;
1124 1125
1125 /* 1126 /*
1126 * Don't sample the current process CPU clocks if there are no timers. 1127 * Don't sample the current process CPU clocks if there are no timers.
@@ -1193,11 +1194,12 @@ static void check_process_timers(struct task_struct *tsk,
1193 SIGPROF); 1194 SIGPROF);
1194 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 1195 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1195 SIGVTALRM); 1196 SIGVTALRM);
1196 1197 soft = sig->rlim[RLIMIT_CPU].rlim_cur;
1197 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1198 if (soft != RLIM_INFINITY) {
1198 unsigned long psecs = cputime_to_secs(ptime); 1199 unsigned long psecs = cputime_to_secs(ptime);
1200 unsigned long hard = sig->rlim[RLIMIT_CPU].rlim_max;
1199 cputime_t x; 1201 cputime_t x;
1200 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { 1202 if (psecs >= hard) {
1201 /* 1203 /*
1202 * At the hard limit, we just die. 1204 * At the hard limit, we just die.
1203 * No need to calculate anything else now. 1205 * No need to calculate anything else now.
@@ -1205,17 +1207,17 @@ static void check_process_timers(struct task_struct *tsk,
1205 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 1207 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1206 return; 1208 return;
1207 } 1209 }
1208 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { 1210 if (psecs >= soft) {
1209 /* 1211 /*
1210 * At the soft limit, send a SIGXCPU every second. 1212 * At the soft limit, send a SIGXCPU every second.
1211 */ 1213 */
1212 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 1214 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1213 if (sig->rlim[RLIMIT_CPU].rlim_cur 1215 if (soft < hard) {
1214 < sig->rlim[RLIMIT_CPU].rlim_max) { 1216 soft++;
1215 sig->rlim[RLIMIT_CPU].rlim_cur++; 1217 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1216 } 1218 }
1217 } 1219 }
1218 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 1220 x = secs_to_cputime(soft);
1219 if (cputime_eq(prof_expires, cputime_zero) || 1221 if (cputime_eq(prof_expires, cputime_zero) ||
1220 cputime_lt(x, prof_expires)) { 1222 cputime_lt(x, prof_expires)) {
1221 prof_expires = x; 1223 prof_expires = x;