aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorStanislaw Gruszka <sgruszka@redhat.com>2009-07-29 06:15:26 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-03 08:48:35 -0400
commit42c4ab41a176ee784c0f28c0b29025a8fc34f05a (patch)
tree370393a0e02faa7a6a7d211205b31ceb74880359 /kernel
parented680c4ad478d0fee9740f7d029087f181346564 (diff)
itimers: Merge ITIMER_VIRT and ITIMER_PROF
Both cpu itimers have same data flow in the few places, this patch make unification of code related with VIRT and PROF itimers. Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Thomas Gleixner <tglx@linutronix.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> LKML-Reference: <1248862529-6063-2-git-send-email-sgruszka@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/itimer.c146
-rw-r--r--kernel/posix-cpu-timers.c98
3 files changed, 119 insertions, 134 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 29b532e718f7..893ab0bf5e39 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -62,6 +62,7 @@
62#include <linux/fs_struct.h> 62#include <linux/fs_struct.h>
63#include <linux/magic.h> 63#include <linux/magic.h>
64#include <linux/perf_counter.h> 64#include <linux/perf_counter.h>
65#include <linux/posix-timers.h>
65 66
66#include <asm/pgtable.h> 67#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
790 thread_group_cputime_init(sig); 791 thread_group_cputime_init(sig);
791 792
792 /* Expiration times and increments. */ 793 /* Expiration times and increments. */
793 sig->it_virt_expires = cputime_zero; 794 sig->it[CPUCLOCK_PROF].expires = cputime_zero;
794 sig->it_virt_incr = cputime_zero; 795 sig->it[CPUCLOCK_PROF].incr = cputime_zero;
795 sig->it_prof_expires = cputime_zero; 796 sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
796 sig->it_prof_incr = cputime_zero; 797 sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
797 798
798 /* Cached expiration times. */ 799 /* Cached expiration times. */
799 sig->cputime_expires.prof_exp = cputime_zero; 800 sig->cputime_expires.prof_exp = cputime_zero;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 58762f7077ec..852c88ddd1f0 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
41 return ktime_to_timeval(rem); 41 return ktime_to_timeval(rem);
42} 42}
43 43
44static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
45 struct itimerval *value)
46{
47 cputime_t cval, cinterval;
48 struct cpu_itimer *it = &tsk->signal->it[clock_id];
49
50 spin_lock_irq(&tsk->sighand->siglock);
51
52 cval = it->expires;
53 cinterval = it->incr;
54 if (!cputime_eq(cval, cputime_zero)) {
55 struct task_cputime cputime;
56 cputime_t t;
57
58 thread_group_cputimer(tsk, &cputime);
59 if (clock_id == CPUCLOCK_PROF)
60 t = cputime_add(cputime.utime, cputime.stime);
61 else
62 /* CPUCLOCK_VIRT */
63 t = cputime.utime;
64
65 if (cputime_le(cval, t))
66 /* about to fire */
67 cval = jiffies_to_cputime(1);
68 else
69 cval = cputime_sub(cval, t);
70 }
71
72 spin_unlock_irq(&tsk->sighand->siglock);
73
74 cputime_to_timeval(cval, &value->it_value);
75 cputime_to_timeval(cinterval, &value->it_interval);
76}
77
44int do_getitimer(int which, struct itimerval *value) 78int do_getitimer(int which, struct itimerval *value)
45{ 79{
46 struct task_struct *tsk = current; 80 struct task_struct *tsk = current;
47 cputime_t cinterval, cval;
48 81
49 switch (which) { 82 switch (which) {
50 case ITIMER_REAL: 83 case ITIMER_REAL:
@@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value)
55 spin_unlock_irq(&tsk->sighand->siglock); 88 spin_unlock_irq(&tsk->sighand->siglock);
56 break; 89 break;
57 case ITIMER_VIRTUAL: 90 case ITIMER_VIRTUAL:
58 spin_lock_irq(&tsk->sighand->siglock); 91 get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
59 cval = tsk->signal->it_virt_expires;
60 cinterval = tsk->signal->it_virt_incr;
61 if (!cputime_eq(cval, cputime_zero)) {
62 struct task_cputime cputime;
63 cputime_t utime;
64
65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1);
69 } else {
70 cval = cputime_sub(cval, utime);
71 }
72 }
73 spin_unlock_irq(&tsk->sighand->siglock);
74 cputime_to_timeval(cval, &value->it_value);
75 cputime_to_timeval(cinterval, &value->it_interval);
76 break; 92 break;
77 case ITIMER_PROF: 93 case ITIMER_PROF:
78 spin_lock_irq(&tsk->sighand->siglock); 94 get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
79 cval = tsk->signal->it_prof_expires;
80 cinterval = tsk->signal->it_prof_incr;
81 if (!cputime_eq(cval, cputime_zero)) {
82 struct task_cputime times;
83 cputime_t ptime;
84
85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1);
89 } else {
90 cval = cputime_sub(cval, ptime);
91 }
92 }
93 spin_unlock_irq(&tsk->sighand->siglock);
94 cputime_to_timeval(cval, &value->it_value);
95 cputime_to_timeval(cinterval, &value->it_interval);
96 break; 95 break;
97 default: 96 default:
98 return(-EINVAL); 97 return(-EINVAL);
@@ -128,6 +127,36 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
128 return HRTIMER_NORESTART; 127 return HRTIMER_NORESTART;
129} 128}
130 129
130static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
131 struct itimerval *value, struct itimerval *ovalue)
132{
133 cputime_t cval, cinterval, nval, ninterval;
134 struct cpu_itimer *it = &tsk->signal->it[clock_id];
135
136 nval = timeval_to_cputime(&value->it_value);
137 ninterval = timeval_to_cputime(&value->it_interval);
138
139 spin_lock_irq(&tsk->sighand->siglock);
140
141 cval = it->expires;
142 cinterval = it->incr;
143 if (!cputime_eq(cval, cputime_zero) ||
144 !cputime_eq(nval, cputime_zero)) {
145 if (cputime_gt(nval, cputime_zero))
146 nval = cputime_add(nval, jiffies_to_cputime(1));
147 set_process_cpu_timer(tsk, clock_id, &nval, &cval);
148 }
149 it->expires = nval;
150 it->incr = ninterval;
151
152 spin_unlock_irq(&tsk->sighand->siglock);
153
154 if (ovalue) {
155 cputime_to_timeval(cval, &ovalue->it_value);
156 cputime_to_timeval(cinterval, &ovalue->it_interval);
157 }
158}
159
131/* 160/*
132 * Returns true if the timeval is in canonical form 161 * Returns true if the timeval is in canonical form
133 */ 162 */
@@ -139,7 +168,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
139 struct task_struct *tsk = current; 168 struct task_struct *tsk = current;
140 struct hrtimer *timer; 169 struct hrtimer *timer;
141 ktime_t expires; 170 ktime_t expires;
142 cputime_t cval, cinterval, nval, ninterval;
143 171
144 /* 172 /*
145 * Validate the timevals in value. 173 * Validate the timevals in value.
@@ -174,48 +202,10 @@ again:
174 spin_unlock_irq(&tsk->sighand->siglock); 202 spin_unlock_irq(&tsk->sighand->siglock);
175 break; 203 break;
176 case ITIMER_VIRTUAL: 204 case ITIMER_VIRTUAL:
177 nval = timeval_to_cputime(&value->it_value); 205 set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
178 ninterval = timeval_to_cputime(&value->it_interval);
179 spin_lock_irq(&tsk->sighand->siglock);
180 cval = tsk->signal->it_virt_expires;
181 cinterval = tsk->signal->it_virt_incr;
182 if (!cputime_eq(cval, cputime_zero) ||
183 !cputime_eq(nval, cputime_zero)) {
184 if (cputime_gt(nval, cputime_zero))
185 nval = cputime_add(nval,
186 jiffies_to_cputime(1));
187 set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
188 &nval, &cval);
189 }
190 tsk->signal->it_virt_expires = nval;
191 tsk->signal->it_virt_incr = ninterval;
192 spin_unlock_irq(&tsk->sighand->siglock);
193 if (ovalue) {
194 cputime_to_timeval(cval, &ovalue->it_value);
195 cputime_to_timeval(cinterval, &ovalue->it_interval);
196 }
197 break; 206 break;
198 case ITIMER_PROF: 207 case ITIMER_PROF:
199 nval = timeval_to_cputime(&value->it_value); 208 set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
200 ninterval = timeval_to_cputime(&value->it_interval);
201 spin_lock_irq(&tsk->sighand->siglock);
202 cval = tsk->signal->it_prof_expires;
203 cinterval = tsk->signal->it_prof_incr;
204 if (!cputime_eq(cval, cputime_zero) ||
205 !cputime_eq(nval, cputime_zero)) {
206 if (cputime_gt(nval, cputime_zero))
207 nval = cputime_add(nval,
208 jiffies_to_cputime(1));
209 set_process_cpu_timer(tsk, CPUCLOCK_PROF,
210 &nval, &cval);
211 }
212 tsk->signal->it_prof_expires = nval;
213 tsk->signal->it_prof_incr = ninterval;
214 spin_unlock_irq(&tsk->sighand->siglock);
215 if (ovalue) {
216 cputime_to_timeval(cval, &ovalue->it_value);
217 cputime_to_timeval(cinterval, &ovalue->it_interval);
218 }
219 break; 209 break;
220 default: 210 default:
221 return -EINVAL; 211 return -EINVAL;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bece7c0b67b2..9b2d5e4dc8c4 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -14,11 +14,11 @@
14 */ 14 */
15void update_rlimit_cpu(unsigned long rlim_new) 15void update_rlimit_cpu(unsigned long rlim_new)
16{ 16{
17 cputime_t cputime; 17 cputime_t cputime = secs_to_cputime(rlim_new);
18 struct signal_struct *const sig = current->signal;
18 19
19 cputime = secs_to_cputime(rlim_new); 20 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 21 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
21 cputime_gt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(&current->sighand->siglock); 22 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock); 24 spin_unlock_irq(&current->sighand->siglock);
@@ -613,6 +613,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
613 break; 613 break;
614 } 614 }
615 } else { 615 } else {
616 struct signal_struct *const sig = p->signal;
617 union cpu_time_count *exp = &timer->it.cpu.expires;
618
616 /* 619 /*
617 * For a process timer, set the cached expiration time. 620 * For a process timer, set the cached expiration time.
618 */ 621 */
@@ -620,30 +623,27 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
620 default: 623 default:
621 BUG(); 624 BUG();
622 case CPUCLOCK_VIRT: 625 case CPUCLOCK_VIRT:
623 if (!cputime_eq(p->signal->it_virt_expires, 626 if (!cputime_eq(sig->it[CPUCLOCK_VIRT].expires,
624 cputime_zero) && 627 cputime_zero) &&
625 cputime_lt(p->signal->it_virt_expires, 628 cputime_lt(sig->it[CPUCLOCK_VIRT].expires,
626 timer->it.cpu.expires.cpu)) 629 exp->cpu))
627 break; 630 break;
628 p->signal->cputime_expires.virt_exp = 631 sig->cputime_expires.virt_exp = exp->cpu;
629 timer->it.cpu.expires.cpu;
630 break; 632 break;
631 case CPUCLOCK_PROF: 633 case CPUCLOCK_PROF:
632 if (!cputime_eq(p->signal->it_prof_expires, 634 if (!cputime_eq(sig->it[CPUCLOCK_PROF].expires,
633 cputime_zero) && 635 cputime_zero) &&
634 cputime_lt(p->signal->it_prof_expires, 636 cputime_lt(sig->it[CPUCLOCK_PROF].expires,
635 timer->it.cpu.expires.cpu)) 637 exp->cpu))
636 break; 638 break;
637 i = p->signal->rlim[RLIMIT_CPU].rlim_cur; 639 i = sig->rlim[RLIMIT_CPU].rlim_cur;
638 if (i != RLIM_INFINITY && 640 if (i != RLIM_INFINITY &&
639 i <= cputime_to_secs(timer->it.cpu.expires.cpu)) 641 i <= cputime_to_secs(exp->cpu))
640 break; 642 break;
641 p->signal->cputime_expires.prof_exp = 643 sig->cputime_expires.prof_exp = exp->cpu;
642 timer->it.cpu.expires.cpu;
643 break; 644 break;
644 case CPUCLOCK_SCHED: 645 case CPUCLOCK_SCHED:
645 p->signal->cputime_expires.sched_exp = 646 sig->cputime_expires.sched_exp = exp->sched;
646 timer->it.cpu.expires.sched;
647 break; 647 break;
648 } 648 }
649 } 649 }
@@ -1070,6 +1070,27 @@ static void stop_process_timers(struct task_struct *tsk)
1070 spin_unlock_irqrestore(&cputimer->lock, flags); 1070 spin_unlock_irqrestore(&cputimer->lock, flags);
1071} 1071}
1072 1072
1073static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1074 cputime_t *expires, cputime_t cur_time, int signo)
1075{
1076 if (cputime_eq(it->expires, cputime_zero))
1077 return;
1078
1079 if (cputime_ge(cur_time, it->expires)) {
1080 it->expires = it->incr;
1081 if (!cputime_eq(it->expires, cputime_zero))
1082 it->expires = cputime_add(it->expires, cur_time);
1083
1084 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1085 }
1086
1087 if (!cputime_eq(it->expires, cputime_zero) &&
1088 (cputime_eq(*expires, cputime_zero) ||
1089 cputime_lt(it->expires, *expires))) {
1090 *expires = it->expires;
1091 }
1092}
1093
1073/* 1094/*
1074 * Check for any per-thread CPU timers that have fired and move them 1095 * Check for any per-thread CPU timers that have fired and move them
1075 * off the tsk->*_timers list onto the firing list. Per-thread timers 1096 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1089,10 +1110,10 @@ static void check_process_timers(struct task_struct *tsk,
1089 * Don't sample the current process CPU clocks if there are no timers. 1110 * Don't sample the current process CPU clocks if there are no timers.
1090 */ 1111 */
1091 if (list_empty(&timers[CPUCLOCK_PROF]) && 1112 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1092 cputime_eq(sig->it_prof_expires, cputime_zero) && 1113 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1093 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1114 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1094 list_empty(&timers[CPUCLOCK_VIRT]) && 1115 list_empty(&timers[CPUCLOCK_VIRT]) &&
1095 cputime_eq(sig->it_virt_expires, cputime_zero) && 1116 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1096 list_empty(&timers[CPUCLOCK_SCHED])) { 1117 list_empty(&timers[CPUCLOCK_SCHED])) {
1097 stop_process_timers(tsk); 1118 stop_process_timers(tsk);
1098 return; 1119 return;
@@ -1152,38 +1173,11 @@ static void check_process_timers(struct task_struct *tsk,
1152 /* 1173 /*
1153 * Check for the special case process timers. 1174 * Check for the special case process timers.
1154 */ 1175 */
1155 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1176 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1156 if (cputime_ge(ptime, sig->it_prof_expires)) { 1177 SIGPROF);
1157 /* ITIMER_PROF fires and reloads. */ 1178 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1158 sig->it_prof_expires = sig->it_prof_incr; 1179 SIGVTALRM);
1159 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1180
1160 sig->it_prof_expires = cputime_add(
1161 sig->it_prof_expires, ptime);
1162 }
1163 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1164 }
1165 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1166 (cputime_eq(prof_expires, cputime_zero) ||
1167 cputime_lt(sig->it_prof_expires, prof_expires))) {
1168 prof_expires = sig->it_prof_expires;
1169 }
1170 }
1171 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1172 if (cputime_ge(utime, sig->it_virt_expires)) {
1173 /* ITIMER_VIRTUAL fires and reloads. */
1174 sig->it_virt_expires = sig->it_virt_incr;
1175 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1176 sig->it_virt_expires = cputime_add(
1177 sig->it_virt_expires, utime);
1178 }
1179 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1180 }
1181 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1182 (cputime_eq(virt_expires, cputime_zero) ||
1183 cputime_lt(sig->it_virt_expires, virt_expires))) {
1184 virt_expires = sig->it_virt_expires;
1185 }
1186 }
1187 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1181 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1188 unsigned long psecs = cputime_to_secs(ptime); 1182 unsigned long psecs = cputime_to_secs(ptime);
1189 cputime_t x; 1183 cputime_t x;