aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-29 04:34:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-29 04:34:29 -0400
commitf71bb0ac5e85410601b0db29d7b1635345ea61a4 (patch)
tree7c3ef70ef008db87d8b71e5de0632766ecd64d2f
parent7285dd7fd375763bfb8ab1ac9cf3f1206f503c16 (diff)
parenta42548a18866e87092db93b771e6c5b060d78401 (diff)
Merge branch 'timers/posixtimers' into timers/tracing
Merge reason: timer tracepoint patches depend on both branches Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/ia64/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/cputime.h13
-rw-r--r--arch/powerpc/kernel/time.c4
-rw-r--r--arch/s390/include/asm/cputime.h1
-rw-r--r--include/asm-generic/cputime.h1
-rw-r--r--include/linux/sched.h16
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/itimer.c164
-rw-r--r--kernel/posix-cpu-timers.c150
-rw-r--r--kernel/sched.c9
10 files changed, 208 insertions, 160 deletions
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index d20b998cb91d..7fa8a8594660 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -30,6 +30,7 @@ typedef u64 cputime_t;
30typedef u64 cputime64_t; 30typedef u64 cputime64_t;
31 31
32#define cputime_zero ((cputime_t)0) 32#define cputime_zero ((cputime_t)0)
33#define cputime_one_jiffy jiffies_to_cputime(1)
33#define cputime_max ((~((cputime_t)0) >> 1) - 1) 34#define cputime_max ((~((cputime_t)0) >> 1) - 1)
34#define cputime_add(__a, __b) ((__a) + (__b)) 35#define cputime_add(__a, __b) ((__a) + (__b))
35#define cputime_sub(__a, __b) ((__a) - (__b)) 36#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index f42e623030ee..fa19f3fe05ff 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -18,6 +18,9 @@
18 18
19#ifndef CONFIG_VIRT_CPU_ACCOUNTING 19#ifndef CONFIG_VIRT_CPU_ACCOUNTING
20#include <asm-generic/cputime.h> 20#include <asm-generic/cputime.h>
21#ifdef __KERNEL__
22static inline void setup_cputime_one_jiffy(void) { }
23#endif
21#else 24#else
22 25
23#include <linux/types.h> 26#include <linux/types.h>
@@ -49,6 +52,11 @@ typedef u64 cputime64_t;
49#ifdef __KERNEL__ 52#ifdef __KERNEL__
50 53
51/* 54/*
55 * One jiffy in timebase units computed during initialization
56 */
57extern cputime_t cputime_one_jiffy;
58
59/*
52 * Convert cputime <-> jiffies 60 * Convert cputime <-> jiffies
53 */ 61 */
54extern u64 __cputime_jiffies_factor; 62extern u64 __cputime_jiffies_factor;
@@ -89,6 +97,11 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif)
89 return ct; 97 return ct;
90} 98}
91 99
100static inline void setup_cputime_one_jiffy(void)
101{
102 cputime_one_jiffy = jiffies_to_cputime(1);
103}
104
92static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 105static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
93{ 106{
94 cputime_t ct; 107 cputime_t ct;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a508388fb87c..5b1657540a7d 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor);
193DEFINE_PER_CPU(unsigned long, cputime_last_delta); 193DEFINE_PER_CPU(unsigned long, cputime_last_delta);
194DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 194DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
195 195
196cputime_t cputime_one_jiffy;
197
196static void calc_cputime_factors(void) 198static void calc_cputime_factors(void)
197{ 199{
198 struct div_result res; 200 struct div_result res;
@@ -500,6 +502,7 @@ static int __init iSeries_tb_recal(void)
500 tb_to_xs = divres.result_low; 502 tb_to_xs = divres.result_low;
501 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 503 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
502 vdso_data->tb_to_xs = tb_to_xs; 504 vdso_data->tb_to_xs = tb_to_xs;
505 setup_cputime_one_jiffy();
503 } 506 }
504 else { 507 else {
505 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 508 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -950,6 +953,7 @@ void __init time_init(void)
950 tb_ticks_per_usec = ppc_tb_freq / 1000000; 953 tb_ticks_per_usec = ppc_tb_freq / 1000000;
951 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 954 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
952 calc_cputime_factors(); 955 calc_cputime_factors();
956 setup_cputime_one_jiffy();
953 957
954 /* 958 /*
955 * Calculate the length of each tick in ns. It will not be 959 * Calculate the length of each tick in ns. It will not be
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 7a3817a656df..24b1244aadb9 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -42,6 +42,7 @@ __div(unsigned long long n, unsigned int base)
42#endif /* __s390x__ */ 42#endif /* __s390x__ */
43 43
44#define cputime_zero (0ULL) 44#define cputime_zero (0ULL)
45#define cputime_one_jiffy jiffies_to_cputime(1)
45#define cputime_max ((~0UL >> 1) - 1) 46#define cputime_max ((~0UL >> 1) - 1)
46#define cputime_add(__a, __b) ((__a) + (__b)) 47#define cputime_add(__a, __b) ((__a) + (__b))
47#define cputime_sub(__a, __b) ((__a) - (__b)) 48#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 1c1fa422d18a..ca0f239f0e13 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -7,6 +7,7 @@
7typedef unsigned long cputime_t; 7typedef unsigned long cputime_t;
8 8
9#define cputime_zero (0UL) 9#define cputime_zero (0UL)
10#define cputime_one_jiffy jiffies_to_cputime(1)
10#define cputime_max ((~0UL >> 1) - 1) 11#define cputime_max ((~0UL >> 1) - 1)
11#define cputime_add(__a, __b) ((__a) + (__b)) 12#define cputime_add(__a, __b) ((__a) + (__b))
12#define cputime_sub(__a, __b) ((__a) - (__b)) 13#define cputime_sub(__a, __b) ((__a) - (__b))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 3ab08e4bb6b8..a069e65e8bb7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -470,6 +470,13 @@ struct pacct_struct {
470 unsigned long ac_minflt, ac_majflt; 470 unsigned long ac_minflt, ac_majflt;
471}; 471};
472 472
473struct cpu_itimer {
474 cputime_t expires;
475 cputime_t incr;
476 u32 error;
477 u32 incr_error;
478};
479
473/** 480/**
474 * struct task_cputime - collected CPU time counts 481 * struct task_cputime - collected CPU time counts
475 * @utime: time spent in user mode, in &cputime_t units 482 * @utime: time spent in user mode, in &cputime_t units
@@ -564,9 +571,12 @@ struct signal_struct {
564 struct pid *leader_pid; 571 struct pid *leader_pid;
565 ktime_t it_real_incr; 572 ktime_t it_real_incr;
566 573
567 /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ 574 /*
568 cputime_t it_prof_expires, it_virt_expires; 575 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
569 cputime_t it_prof_incr, it_virt_incr; 576 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
577 * values are defined to 0 and 1 respectively
578 */
579 struct cpu_itimer it[2];
570 580
571 /* 581 /*
572 * Thread group totals for process CPU timers. 582 * Thread group totals for process CPU timers.
diff --git a/kernel/fork.c b/kernel/fork.c
index 021e1138556e..14cf79f14237 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -62,6 +62,7 @@
62#include <linux/fs_struct.h> 62#include <linux/fs_struct.h>
63#include <linux/magic.h> 63#include <linux/magic.h>
64#include <linux/perf_counter.h> 64#include <linux/perf_counter.h>
65#include <linux/posix-timers.h>
65 66
66#include <asm/pgtable.h> 67#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -790,10 +791,10 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
790 thread_group_cputime_init(sig); 791 thread_group_cputime_init(sig);
791 792
792 /* Expiration times and increments. */ 793 /* Expiration times and increments. */
793 sig->it_virt_expires = cputime_zero; 794 sig->it[CPUCLOCK_PROF].expires = cputime_zero;
794 sig->it_virt_incr = cputime_zero; 795 sig->it[CPUCLOCK_PROF].incr = cputime_zero;
795 sig->it_prof_expires = cputime_zero; 796 sig->it[CPUCLOCK_VIRT].expires = cputime_zero;
796 sig->it_prof_incr = cputime_zero; 797 sig->it[CPUCLOCK_VIRT].incr = cputime_zero;
797 798
798 /* Cached expiration times. */ 799 /* Cached expiration times. */
799 sig->cputime_expires.prof_exp = cputime_zero; 800 sig->cputime_expires.prof_exp = cputime_zero;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 58762f7077ec..8078a32d3b10 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -41,10 +41,43 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
41 return ktime_to_timeval(rem); 41 return ktime_to_timeval(rem);
42} 42}
43 43
44static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
45 struct itimerval *const value)
46{
47 cputime_t cval, cinterval;
48 struct cpu_itimer *it = &tsk->signal->it[clock_id];
49
50 spin_lock_irq(&tsk->sighand->siglock);
51
52 cval = it->expires;
53 cinterval = it->incr;
54 if (!cputime_eq(cval, cputime_zero)) {
55 struct task_cputime cputime;
56 cputime_t t;
57
58 thread_group_cputimer(tsk, &cputime);
59 if (clock_id == CPUCLOCK_PROF)
60 t = cputime_add(cputime.utime, cputime.stime);
61 else
62 /* CPUCLOCK_VIRT */
63 t = cputime.utime;
64
65 if (cputime_le(cval, t))
66 /* about to fire */
67 cval = cputime_one_jiffy;
68 else
69 cval = cputime_sub(cval, t);
70 }
71
72 spin_unlock_irq(&tsk->sighand->siglock);
73
74 cputime_to_timeval(cval, &value->it_value);
75 cputime_to_timeval(cinterval, &value->it_interval);
76}
77
44int do_getitimer(int which, struct itimerval *value) 78int do_getitimer(int which, struct itimerval *value)
45{ 79{
46 struct task_struct *tsk = current; 80 struct task_struct *tsk = current;
47 cputime_t cinterval, cval;
48 81
49 switch (which) { 82 switch (which) {
50 case ITIMER_REAL: 83 case ITIMER_REAL:
@@ -55,44 +88,10 @@ int do_getitimer(int which, struct itimerval *value)
55 spin_unlock_irq(&tsk->sighand->siglock); 88 spin_unlock_irq(&tsk->sighand->siglock);
56 break; 89 break;
57 case ITIMER_VIRTUAL: 90 case ITIMER_VIRTUAL:
58 spin_lock_irq(&tsk->sighand->siglock); 91 get_cpu_itimer(tsk, CPUCLOCK_VIRT, value);
59 cval = tsk->signal->it_virt_expires;
60 cinterval = tsk->signal->it_virt_incr;
61 if (!cputime_eq(cval, cputime_zero)) {
62 struct task_cputime cputime;
63 cputime_t utime;
64
65 thread_group_cputimer(tsk, &cputime);
66 utime = cputime.utime;
67 if (cputime_le(cval, utime)) { /* about to fire */
68 cval = jiffies_to_cputime(1);
69 } else {
70 cval = cputime_sub(cval, utime);
71 }
72 }
73 spin_unlock_irq(&tsk->sighand->siglock);
74 cputime_to_timeval(cval, &value->it_value);
75 cputime_to_timeval(cinterval, &value->it_interval);
76 break; 92 break;
77 case ITIMER_PROF: 93 case ITIMER_PROF:
78 spin_lock_irq(&tsk->sighand->siglock); 94 get_cpu_itimer(tsk, CPUCLOCK_PROF, value);
79 cval = tsk->signal->it_prof_expires;
80 cinterval = tsk->signal->it_prof_incr;
81 if (!cputime_eq(cval, cputime_zero)) {
82 struct task_cputime times;
83 cputime_t ptime;
84
85 thread_group_cputimer(tsk, &times);
86 ptime = cputime_add(times.utime, times.stime);
87 if (cputime_le(cval, ptime)) { /* about to fire */
88 cval = jiffies_to_cputime(1);
89 } else {
90 cval = cputime_sub(cval, ptime);
91 }
92 }
93 spin_unlock_irq(&tsk->sighand->siglock);
94 cputime_to_timeval(cval, &value->it_value);
95 cputime_to_timeval(cinterval, &value->it_interval);
96 break; 95 break;
97 default: 96 default:
98 return(-EINVAL); 97 return(-EINVAL);
@@ -128,6 +127,54 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
128 return HRTIMER_NORESTART; 127 return HRTIMER_NORESTART;
129} 128}
130 129
130static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
131{
132 struct timespec ts;
133 s64 cpu_ns;
134
135 cputime_to_timespec(ct, &ts);
136 cpu_ns = timespec_to_ns(&ts);
137
138 return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
139}
140
141static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
142 const struct itimerval *const value,
143 struct itimerval *const ovalue)
144{
145 cputime_t cval, nval, cinterval, ninterval;
146 s64 ns_ninterval, ns_nval;
147 struct cpu_itimer *it = &tsk->signal->it[clock_id];
148
149 nval = timeval_to_cputime(&value->it_value);
150 ns_nval = timeval_to_ns(&value->it_value);
151 ninterval = timeval_to_cputime(&value->it_interval);
152 ns_ninterval = timeval_to_ns(&value->it_interval);
153
154 it->incr_error = cputime_sub_ns(ninterval, ns_ninterval);
155 it->error = cputime_sub_ns(nval, ns_nval);
156
157 spin_lock_irq(&tsk->sighand->siglock);
158
159 cval = it->expires;
160 cinterval = it->incr;
161 if (!cputime_eq(cval, cputime_zero) ||
162 !cputime_eq(nval, cputime_zero)) {
163 if (cputime_gt(nval, cputime_zero))
164 nval = cputime_add(nval, cputime_one_jiffy);
165 set_process_cpu_timer(tsk, clock_id, &nval, &cval);
166 }
167 it->expires = nval;
168 it->incr = ninterval;
169
170 spin_unlock_irq(&tsk->sighand->siglock);
171
172 if (ovalue) {
173 cputime_to_timeval(cval, &ovalue->it_value);
174 cputime_to_timeval(cinterval, &ovalue->it_interval);
175 }
176}
177
131/* 178/*
132 * Returns true if the timeval is in canonical form 179 * Returns true if the timeval is in canonical form
133 */ 180 */
@@ -139,7 +186,6 @@ int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
139 struct task_struct *tsk = current; 186 struct task_struct *tsk = current;
140 struct hrtimer *timer; 187 struct hrtimer *timer;
141 ktime_t expires; 188 ktime_t expires;
142 cputime_t cval, cinterval, nval, ninterval;
143 189
144 /* 190 /*
145 * Validate the timevals in value. 191 * Validate the timevals in value.
@@ -174,48 +220,10 @@ again:
174 spin_unlock_irq(&tsk->sighand->siglock); 220 spin_unlock_irq(&tsk->sighand->siglock);
175 break; 221 break;
176 case ITIMER_VIRTUAL: 222 case ITIMER_VIRTUAL:
177 nval = timeval_to_cputime(&value->it_value); 223 set_cpu_itimer(tsk, CPUCLOCK_VIRT, value, ovalue);
178 ninterval = timeval_to_cputime(&value->it_interval);
179 spin_lock_irq(&tsk->sighand->siglock);
180 cval = tsk->signal->it_virt_expires;
181 cinterval = tsk->signal->it_virt_incr;
182 if (!cputime_eq(cval, cputime_zero) ||
183 !cputime_eq(nval, cputime_zero)) {
184 if (cputime_gt(nval, cputime_zero))
185 nval = cputime_add(nval,
186 jiffies_to_cputime(1));
187 set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
188 &nval, &cval);
189 }
190 tsk->signal->it_virt_expires = nval;
191 tsk->signal->it_virt_incr = ninterval;
192 spin_unlock_irq(&tsk->sighand->siglock);
193 if (ovalue) {
194 cputime_to_timeval(cval, &ovalue->it_value);
195 cputime_to_timeval(cinterval, &ovalue->it_interval);
196 }
197 break; 224 break;
198 case ITIMER_PROF: 225 case ITIMER_PROF:
199 nval = timeval_to_cputime(&value->it_value); 226 set_cpu_itimer(tsk, CPUCLOCK_PROF, value, ovalue);
200 ninterval = timeval_to_cputime(&value->it_interval);
201 spin_lock_irq(&tsk->sighand->siglock);
202 cval = tsk->signal->it_prof_expires;
203 cinterval = tsk->signal->it_prof_incr;
204 if (!cputime_eq(cval, cputime_zero) ||
205 !cputime_eq(nval, cputime_zero)) {
206 if (cputime_gt(nval, cputime_zero))
207 nval = cputime_add(nval,
208 jiffies_to_cputime(1));
209 set_process_cpu_timer(tsk, CPUCLOCK_PROF,
210 &nval, &cval);
211 }
212 tsk->signal->it_prof_expires = nval;
213 tsk->signal->it_prof_incr = ninterval;
214 spin_unlock_irq(&tsk->sighand->siglock);
215 if (ovalue) {
216 cputime_to_timeval(cval, &ovalue->it_value);
217 cputime_to_timeval(cinterval, &ovalue->it_interval);
218 }
219 break; 227 break;
220 default: 228 default:
221 return -EINVAL; 229 return -EINVAL;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e33a21cb9407..12161f74744e 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -14,11 +14,11 @@
14 */ 14 */
15void update_rlimit_cpu(unsigned long rlim_new) 15void update_rlimit_cpu(unsigned long rlim_new)
16{ 16{
17 cputime_t cputime; 17 cputime_t cputime = secs_to_cputime(rlim_new);
18 struct signal_struct *const sig = current->signal;
18 19
19 cputime = secs_to_cputime(rlim_new); 20 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 21 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
21 cputime_gt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(&current->sighand->siglock); 22 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock); 24 spin_unlock_irq(&current->sighand->siglock);
@@ -542,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
542 now); 542 now);
543} 543}
544 544
545static inline int expires_gt(cputime_t expires, cputime_t new_exp)
546{
547 return cputime_eq(expires, cputime_zero) ||
548 cputime_gt(expires, new_exp);
549}
550
551static inline int expires_le(cputime_t expires, cputime_t new_exp)
552{
553 return !cputime_eq(expires, cputime_zero) &&
554 cputime_le(expires, new_exp);
555}
545/* 556/*
546 * Insert the timer on the appropriate list before any timers that 557 * Insert the timer on the appropriate list before any timers that
547 * expire later. This must be called with the tasklist_lock held 558 * expire later. This must be called with the tasklist_lock held
@@ -586,34 +597,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
586 */ 597 */
587 598
588 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 599 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
600 union cpu_time_count *exp = &nt->expires;
601
589 switch (CPUCLOCK_WHICH(timer->it_clock)) { 602 switch (CPUCLOCK_WHICH(timer->it_clock)) {
590 default: 603 default:
591 BUG(); 604 BUG();
592 case CPUCLOCK_PROF: 605 case CPUCLOCK_PROF:
593 if (cputime_eq(p->cputime_expires.prof_exp, 606 if (expires_gt(p->cputime_expires.prof_exp,
594 cputime_zero) || 607 exp->cpu))
595 cputime_gt(p->cputime_expires.prof_exp, 608 p->cputime_expires.prof_exp = exp->cpu;
596 nt->expires.cpu))
597 p->cputime_expires.prof_exp =
598 nt->expires.cpu;
599 break; 609 break;
600 case CPUCLOCK_VIRT: 610 case CPUCLOCK_VIRT:
601 if (cputime_eq(p->cputime_expires.virt_exp, 611 if (expires_gt(p->cputime_expires.virt_exp,
602 cputime_zero) || 612 exp->cpu))
603 cputime_gt(p->cputime_expires.virt_exp, 613 p->cputime_expires.virt_exp = exp->cpu;
604 nt->expires.cpu))
605 p->cputime_expires.virt_exp =
606 nt->expires.cpu;
607 break; 614 break;
608 case CPUCLOCK_SCHED: 615 case CPUCLOCK_SCHED:
609 if (p->cputime_expires.sched_exp == 0 || 616 if (p->cputime_expires.sched_exp == 0 ||
610 p->cputime_expires.sched_exp > 617 p->cputime_expires.sched_exp > exp->sched)
611 nt->expires.sched)
612 p->cputime_expires.sched_exp = 618 p->cputime_expires.sched_exp =
613 nt->expires.sched; 619 exp->sched;
614 break; 620 break;
615 } 621 }
616 } else { 622 } else {
623 struct signal_struct *const sig = p->signal;
624 union cpu_time_count *exp = &timer->it.cpu.expires;
625
617 /* 626 /*
618 * For a process timer, set the cached expiration time. 627 * For a process timer, set the cached expiration time.
619 */ 628 */
@@ -621,30 +630,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
621 default: 630 default:
622 BUG(); 631 BUG();
623 case CPUCLOCK_VIRT: 632 case CPUCLOCK_VIRT:
624 if (!cputime_eq(p->signal->it_virt_expires, 633 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
625 cputime_zero) && 634 exp->cpu))
626 cputime_lt(p->signal->it_virt_expires,
627 timer->it.cpu.expires.cpu))
628 break; 635 break;
629 p->signal->cputime_expires.virt_exp = 636 sig->cputime_expires.virt_exp = exp->cpu;
630 timer->it.cpu.expires.cpu;
631 break; 637 break;
632 case CPUCLOCK_PROF: 638 case CPUCLOCK_PROF:
633 if (!cputime_eq(p->signal->it_prof_expires, 639 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
634 cputime_zero) && 640 exp->cpu))
635 cputime_lt(p->signal->it_prof_expires,
636 timer->it.cpu.expires.cpu))
637 break; 641 break;
638 i = p->signal->rlim[RLIMIT_CPU].rlim_cur; 642 i = sig->rlim[RLIMIT_CPU].rlim_cur;
639 if (i != RLIM_INFINITY && 643 if (i != RLIM_INFINITY &&
640 i <= cputime_to_secs(timer->it.cpu.expires.cpu)) 644 i <= cputime_to_secs(exp->cpu))
641 break; 645 break;
642 p->signal->cputime_expires.prof_exp = 646 sig->cputime_expires.prof_exp = exp->cpu;
643 timer->it.cpu.expires.cpu;
644 break; 647 break;
645 case CPUCLOCK_SCHED: 648 case CPUCLOCK_SCHED:
646 p->signal->cputime_expires.sched_exp = 649 sig->cputime_expires.sched_exp = exp->sched;
647 timer->it.cpu.expires.sched;
648 break; 650 break;
649 } 651 }
650 } 652 }
@@ -1071,6 +1073,36 @@ static void stop_process_timers(struct task_struct *tsk)
1071 spin_unlock_irqrestore(&cputimer->lock, flags); 1073 spin_unlock_irqrestore(&cputimer->lock, flags);
1072} 1074}
1073 1075
1076static u32 onecputick;
1077
1078static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1079 cputime_t *expires, cputime_t cur_time, int signo)
1080{
1081 if (cputime_eq(it->expires, cputime_zero))
1082 return;
1083
1084 if (cputime_ge(cur_time, it->expires)) {
1085 if (!cputime_eq(it->incr, cputime_zero)) {
1086 it->expires = cputime_add(it->expires, it->incr);
1087 it->error += it->incr_error;
1088 if (it->error >= onecputick) {
1089 it->expires = cputime_sub(it->expires,
1090 cputime_one_jiffy);
1091 it->error -= onecputick;
1092 }
1093 } else
1094 it->expires = cputime_zero;
1095
1096 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1097 }
1098
1099 if (!cputime_eq(it->expires, cputime_zero) &&
1100 (cputime_eq(*expires, cputime_zero) ||
1101 cputime_lt(it->expires, *expires))) {
1102 *expires = it->expires;
1103 }
1104}
1105
1074/* 1106/*
1075 * Check for any per-thread CPU timers that have fired and move them 1107 * Check for any per-thread CPU timers that have fired and move them
1076 * off the tsk->*_timers list onto the firing list. Per-thread timers 1108 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1090,10 +1122,10 @@ static void check_process_timers(struct task_struct *tsk,
1090 * Don't sample the current process CPU clocks if there are no timers. 1122 * Don't sample the current process CPU clocks if there are no timers.
1091 */ 1123 */
1092 if (list_empty(&timers[CPUCLOCK_PROF]) && 1124 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1093 cputime_eq(sig->it_prof_expires, cputime_zero) && 1125 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1094 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1126 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1095 list_empty(&timers[CPUCLOCK_VIRT]) && 1127 list_empty(&timers[CPUCLOCK_VIRT]) &&
1096 cputime_eq(sig->it_virt_expires, cputime_zero) && 1128 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1097 list_empty(&timers[CPUCLOCK_SCHED])) { 1129 list_empty(&timers[CPUCLOCK_SCHED])) {
1098 stop_process_timers(tsk); 1130 stop_process_timers(tsk);
1099 return; 1131 return;
@@ -1153,38 +1185,11 @@ static void check_process_timers(struct task_struct *tsk,
1153 /* 1185 /*
1154 * Check for the special case process timers. 1186 * Check for the special case process timers.
1155 */ 1187 */
1156 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1188 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1157 if (cputime_ge(ptime, sig->it_prof_expires)) { 1189 SIGPROF);
1158 /* ITIMER_PROF fires and reloads. */ 1190 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1159 sig->it_prof_expires = sig->it_prof_incr; 1191 SIGVTALRM);
1160 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1192
1161 sig->it_prof_expires = cputime_add(
1162 sig->it_prof_expires, ptime);
1163 }
1164 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1165 }
1166 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1167 (cputime_eq(prof_expires, cputime_zero) ||
1168 cputime_lt(sig->it_prof_expires, prof_expires))) {
1169 prof_expires = sig->it_prof_expires;
1170 }
1171 }
1172 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1173 if (cputime_ge(utime, sig->it_virt_expires)) {
1174 /* ITIMER_VIRTUAL fires and reloads. */
1175 sig->it_virt_expires = sig->it_virt_incr;
1176 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1177 sig->it_virt_expires = cputime_add(
1178 sig->it_virt_expires, utime);
1179 }
1180 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1181 }
1182 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1183 (cputime_eq(virt_expires, cputime_zero) ||
1184 cputime_lt(sig->it_virt_expires, virt_expires))) {
1185 virt_expires = sig->it_virt_expires;
1186 }
1187 }
1188 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1193 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1189 unsigned long psecs = cputime_to_secs(ptime); 1194 unsigned long psecs = cputime_to_secs(ptime);
1190 cputime_t x; 1195 cputime_t x;
@@ -1457,7 +1462,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1457 if (!cputime_eq(*oldval, cputime_zero)) { 1462 if (!cputime_eq(*oldval, cputime_zero)) {
1458 if (cputime_le(*oldval, now.cpu)) { 1463 if (cputime_le(*oldval, now.cpu)) {
1459 /* Just about to fire. */ 1464 /* Just about to fire. */
1460 *oldval = jiffies_to_cputime(1); 1465 *oldval = cputime_one_jiffy;
1461 } else { 1466 } else {
1462 *oldval = cputime_sub(*oldval, now.cpu); 1467 *oldval = cputime_sub(*oldval, now.cpu);
1463 } 1468 }
@@ -1703,10 +1708,15 @@ static __init int init_posix_cpu_timers(void)
1703 .nsleep = thread_cpu_nsleep, 1708 .nsleep = thread_cpu_nsleep,
1704 .nsleep_restart = thread_cpu_nsleep_restart, 1709 .nsleep_restart = thread_cpu_nsleep_restart,
1705 }; 1710 };
1711 struct timespec ts;
1706 1712
1707 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); 1713 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1708 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); 1714 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1709 1715
1716 cputime_to_timespec(cputime_one_jiffy, &ts);
1717 onecputick = ts.tv_nsec;
1718 WARN_ON(ts.tv_sec != 0);
1719
1710 return 0; 1720 return 0;
1711} 1721}
1712__initcall(init_posix_cpu_timers); 1722__initcall(init_posix_cpu_timers);
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..8f977d5cc515 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5031,17 +5031,16 @@ void account_idle_time(cputime_t cputime)
5031 */ 5031 */
5032void account_process_tick(struct task_struct *p, int user_tick) 5032void account_process_tick(struct task_struct *p, int user_tick)
5033{ 5033{
5034 cputime_t one_jiffy = jiffies_to_cputime(1); 5034 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
5035 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
5036 struct rq *rq = this_rq(); 5035 struct rq *rq = this_rq();
5037 5036
5038 if (user_tick) 5037 if (user_tick)
5039 account_user_time(p, one_jiffy, one_jiffy_scaled); 5038 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
5040 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 5039 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
5041 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 5040 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
5042 one_jiffy_scaled); 5041 one_jiffy_scaled);
5043 else 5042 else
5044 account_idle_time(one_jiffy); 5043 account_idle_time(cputime_one_jiffy);
5045} 5044}
5046 5045
5047/* 5046/*