diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-06-27 20:06:42 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2013-07-03 10:16:20 -0400 |
commit | 55ccb616a6e42052edb37e9c4f82cf8854a59429 (patch) | |
tree | 9a01858cb1aa6c49dc3baaff5bd513d068b4e22e /kernel/posix-cpu-timers.c | |
parent | 8bb495e3f02401ee6f76d1b1d77f3ac9f079e376 (diff) |
posix_cpu_timer: consolidate expiry time type
The posix cpu timer expiry time is stored in a union of two types: a 64
bits field if we rely on scheduler precise accounting, or a cputime_t if
we rely on jiffies.
This results in quite some duplicate code and special cases to handle the
two types.
Just unify this into a single 64 bits field. cputime_t can always fit
into it.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Olivier Langlois <olivier@trillion01.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 266 |
1 files changed, 106 insertions, 160 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 42670e9b44e0..c3c4ea1225a4 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -51,59 +51,28 @@ static int check_clock(const clockid_t which_clock) | |||
51 | return error; | 51 | return error; |
52 | } | 52 | } |
53 | 53 | ||
54 | static inline union cpu_time_count | 54 | static inline unsigned long long |
55 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) | 55 | timespec_to_sample(const clockid_t which_clock, const struct timespec *tp) |
56 | { | 56 | { |
57 | union cpu_time_count ret; | 57 | unsigned long long ret; |
58 | ret.sched = 0; /* high half always zero when .cpu used */ | 58 | |
59 | ret = 0; /* high half always zero when .cpu used */ | ||
59 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 60 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
60 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; | 61 | ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
61 | } else { | 62 | } else { |
62 | ret.cpu = timespec_to_cputime(tp); | 63 | ret = cputime_to_expires(timespec_to_cputime(tp)); |
63 | } | 64 | } |
64 | return ret; | 65 | return ret; |
65 | } | 66 | } |
66 | 67 | ||
67 | static void sample_to_timespec(const clockid_t which_clock, | 68 | static void sample_to_timespec(const clockid_t which_clock, |
68 | union cpu_time_count cpu, | 69 | unsigned long long expires, |
69 | struct timespec *tp) | 70 | struct timespec *tp) |
70 | { | 71 | { |
71 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) | 72 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) |
72 | *tp = ns_to_timespec(cpu.sched); | 73 | *tp = ns_to_timespec(expires); |
73 | else | 74 | else |
74 | cputime_to_timespec(cpu.cpu, tp); | 75 | cputime_to_timespec((__force cputime_t)expires, tp); |
75 | } | ||
76 | |||
77 | static inline int cpu_time_before(const clockid_t which_clock, | ||
78 | union cpu_time_count now, | ||
79 | union cpu_time_count then) | ||
80 | { | ||
81 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | ||
82 | return now.sched < then.sched; | ||
83 | } else { | ||
84 | return now.cpu < then.cpu; | ||
85 | } | ||
86 | } | ||
87 | static inline void cpu_time_add(const clockid_t which_clock, | ||
88 | union cpu_time_count *acc, | ||
89 | union cpu_time_count val) | ||
90 | { | ||
91 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | ||
92 | acc->sched += val.sched; | ||
93 | } else { | ||
94 | acc->cpu += val.cpu; | ||
95 | } | ||
96 | } | ||
97 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | ||
98 | union cpu_time_count a, | ||
99 | union cpu_time_count b) | ||
100 | { | ||
101 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | ||
102 | a.sched -= b.sched; | ||
103 | } else { | ||
104 | a.cpu -= b.cpu; | ||
105 | } | ||
106 | return a; | ||
107 | } | 76 | } |
108 | 77 | ||
109 | /* | 78 | /* |
@@ -111,47 +80,31 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | |||
111 | * given the current clock sample. | 80 | * given the current clock sample. |
112 | */ | 81 | */ |
113 | static void bump_cpu_timer(struct k_itimer *timer, | 82 | static void bump_cpu_timer(struct k_itimer *timer, |
114 | union cpu_time_count now) | 83 | unsigned long long now) |
115 | { | 84 | { |
116 | int i; | 85 | int i; |
86 | unsigned long long delta, incr; | ||
117 | 87 | ||
118 | if (timer->it.cpu.incr.sched == 0) | 88 | if (timer->it.cpu.incr == 0) |
119 | return; | 89 | return; |
120 | 90 | ||
121 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 91 | if (now < timer->it.cpu.expires) |
122 | unsigned long long delta, incr; | 92 | return; |
123 | 93 | ||
124 | if (now.sched < timer->it.cpu.expires.sched) | 94 | incr = timer->it.cpu.incr; |
125 | return; | 95 | delta = now + incr - timer->it.cpu.expires; |
126 | incr = timer->it.cpu.incr.sched; | ||
127 | delta = now.sched + incr - timer->it.cpu.expires.sched; | ||
128 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | ||
129 | for (i = 0; incr < delta - incr; i++) | ||
130 | incr = incr << 1; | ||
131 | for (; i >= 0; incr >>= 1, i--) { | ||
132 | if (delta < incr) | ||
133 | continue; | ||
134 | timer->it.cpu.expires.sched += incr; | ||
135 | timer->it_overrun += 1 << i; | ||
136 | delta -= incr; | ||
137 | } | ||
138 | } else { | ||
139 | cputime_t delta, incr; | ||
140 | 96 | ||
141 | if (now.cpu < timer->it.cpu.expires.cpu) | 97 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
142 | return; | 98 | for (i = 0; incr < delta - incr; i++) |
143 | incr = timer->it.cpu.incr.cpu; | 99 | incr = incr << 1; |
144 | delta = now.cpu + incr - timer->it.cpu.expires.cpu; | 100 | |
145 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | 101 | for (; i >= 0; incr >>= 1, i--) { |
146 | for (i = 0; incr < delta - incr; i++) | 102 | if (delta < incr) |
147 | incr += incr; | 103 | continue; |
148 | for (; i >= 0; incr = incr >> 1, i--) { | 104 | |
149 | if (delta < incr) | 105 | timer->it.cpu.expires += incr; |
150 | continue; | 106 | timer->it_overrun += 1 << i; |
151 | timer->it.cpu.expires.cpu += incr; | 107 | delta -= incr; |
152 | timer->it_overrun += 1 << i; | ||
153 | delta -= incr; | ||
154 | } | ||
155 | } | 108 | } |
156 | } | 109 | } |
157 | 110 | ||
@@ -170,21 +123,21 @@ static inline int task_cputime_zero(const struct task_cputime *cputime) | |||
170 | return 0; | 123 | return 0; |
171 | } | 124 | } |
172 | 125 | ||
173 | static inline cputime_t prof_ticks(struct task_struct *p) | 126 | static inline unsigned long long prof_ticks(struct task_struct *p) |
174 | { | 127 | { |
175 | cputime_t utime, stime; | 128 | cputime_t utime, stime; |
176 | 129 | ||
177 | task_cputime(p, &utime, &stime); | 130 | task_cputime(p, &utime, &stime); |
178 | 131 | ||
179 | return utime + stime; | 132 | return cputime_to_expires(utime + stime); |
180 | } | 133 | } |
181 | static inline cputime_t virt_ticks(struct task_struct *p) | 134 | static inline unsigned long long virt_ticks(struct task_struct *p) |
182 | { | 135 | { |
183 | cputime_t utime; | 136 | cputime_t utime; |
184 | 137 | ||
185 | task_cputime(p, &utime, NULL); | 138 | task_cputime(p, &utime, NULL); |
186 | 139 | ||
187 | return utime; | 140 | return cputime_to_expires(utime); |
188 | } | 141 | } |
189 | 142 | ||
190 | static int | 143 | static int |
@@ -225,19 +178,19 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp) | |||
225 | * Sample a per-thread clock for the given task. | 178 | * Sample a per-thread clock for the given task. |
226 | */ | 179 | */ |
227 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | 180 | static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, |
228 | union cpu_time_count *cpu) | 181 | unsigned long long *sample) |
229 | { | 182 | { |
230 | switch (CPUCLOCK_WHICH(which_clock)) { | 183 | switch (CPUCLOCK_WHICH(which_clock)) { |
231 | default: | 184 | default: |
232 | return -EINVAL; | 185 | return -EINVAL; |
233 | case CPUCLOCK_PROF: | 186 | case CPUCLOCK_PROF: |
234 | cpu->cpu = prof_ticks(p); | 187 | *sample = prof_ticks(p); |
235 | break; | 188 | break; |
236 | case CPUCLOCK_VIRT: | 189 | case CPUCLOCK_VIRT: |
237 | cpu->cpu = virt_ticks(p); | 190 | *sample = virt_ticks(p); |
238 | break; | 191 | break; |
239 | case CPUCLOCK_SCHED: | 192 | case CPUCLOCK_SCHED: |
240 | cpu->sched = task_sched_runtime(p); | 193 | *sample = task_sched_runtime(p); |
241 | break; | 194 | break; |
242 | } | 195 | } |
243 | return 0; | 196 | return 0; |
@@ -284,7 +237,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
284 | */ | 237 | */ |
285 | static int cpu_clock_sample_group(const clockid_t which_clock, | 238 | static int cpu_clock_sample_group(const clockid_t which_clock, |
286 | struct task_struct *p, | 239 | struct task_struct *p, |
287 | union cpu_time_count *cpu) | 240 | unsigned long long *sample) |
288 | { | 241 | { |
289 | struct task_cputime cputime; | 242 | struct task_cputime cputime; |
290 | 243 | ||
@@ -293,15 +246,15 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
293 | return -EINVAL; | 246 | return -EINVAL; |
294 | case CPUCLOCK_PROF: | 247 | case CPUCLOCK_PROF: |
295 | thread_group_cputime(p, &cputime); | 248 | thread_group_cputime(p, &cputime); |
296 | cpu->cpu = cputime.utime + cputime.stime; | 249 | *sample = cputime_to_expires(cputime.utime + cputime.stime); |
297 | break; | 250 | break; |
298 | case CPUCLOCK_VIRT: | 251 | case CPUCLOCK_VIRT: |
299 | thread_group_cputime(p, &cputime); | 252 | thread_group_cputime(p, &cputime); |
300 | cpu->cpu = cputime.utime; | 253 | *sample = cputime_to_expires(cputime.utime); |
301 | break; | 254 | break; |
302 | case CPUCLOCK_SCHED: | 255 | case CPUCLOCK_SCHED: |
303 | thread_group_cputime(p, &cputime); | 256 | thread_group_cputime(p, &cputime); |
304 | cpu->sched = cputime.sum_exec_runtime; | 257 | *sample = cputime.sum_exec_runtime; |
305 | break; | 258 | break; |
306 | } | 259 | } |
307 | return 0; | 260 | return 0; |
@@ -312,7 +265,7 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) | |||
312 | { | 265 | { |
313 | const pid_t pid = CPUCLOCK_PID(which_clock); | 266 | const pid_t pid = CPUCLOCK_PID(which_clock); |
314 | int error = -EINVAL; | 267 | int error = -EINVAL; |
315 | union cpu_time_count rtn; | 268 | unsigned long long rtn; |
316 | 269 | ||
317 | if (pid == 0) { | 270 | if (pid == 0) { |
318 | /* | 271 | /* |
@@ -461,30 +414,30 @@ static void cleanup_timers(struct list_head *head, | |||
461 | 414 | ||
462 | list_for_each_entry_safe(timer, next, head, entry) { | 415 | list_for_each_entry_safe(timer, next, head, entry) { |
463 | list_del_init(&timer->entry); | 416 | list_del_init(&timer->entry); |
464 | if (timer->expires.cpu < ptime) { | 417 | if (timer->expires < cputime_to_expires(ptime)) { |
465 | timer->expires.cpu = 0; | 418 | timer->expires = 0; |
466 | } else { | 419 | } else { |
467 | timer->expires.cpu -= ptime; | 420 | timer->expires -= cputime_to_expires(ptime); |
468 | } | 421 | } |
469 | } | 422 | } |
470 | 423 | ||
471 | ++head; | 424 | ++head; |
472 | list_for_each_entry_safe(timer, next, head, entry) { | 425 | list_for_each_entry_safe(timer, next, head, entry) { |
473 | list_del_init(&timer->entry); | 426 | list_del_init(&timer->entry); |
474 | if (timer->expires.cpu < utime) { | 427 | if (timer->expires < cputime_to_expires(utime)) { |
475 | timer->expires.cpu = 0; | 428 | timer->expires = 0; |
476 | } else { | 429 | } else { |
477 | timer->expires.cpu -= utime; | 430 | timer->expires -= cputime_to_expires(utime); |
478 | } | 431 | } |
479 | } | 432 | } |
480 | 433 | ||
481 | ++head; | 434 | ++head; |
482 | list_for_each_entry_safe(timer, next, head, entry) { | 435 | list_for_each_entry_safe(timer, next, head, entry) { |
483 | list_del_init(&timer->entry); | 436 | list_del_init(&timer->entry); |
484 | if (timer->expires.sched < sum_exec_runtime) { | 437 | if (timer->expires < sum_exec_runtime) { |
485 | timer->expires.sched = 0; | 438 | timer->expires = 0; |
486 | } else { | 439 | } else { |
487 | timer->expires.sched -= sum_exec_runtime; | 440 | timer->expires -= sum_exec_runtime; |
488 | } | 441 | } |
489 | } | 442 | } |
490 | } | 443 | } |
@@ -516,7 +469,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
516 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | 469 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
517 | } | 470 | } |
518 | 471 | ||
519 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 472 | static void clear_dead_task(struct k_itimer *timer, unsigned long long now) |
520 | { | 473 | { |
521 | /* | 474 | /* |
522 | * That's all for this thread or process. | 475 | * That's all for this thread or process. |
@@ -524,9 +477,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
524 | */ | 477 | */ |
525 | put_task_struct(timer->it.cpu.task); | 478 | put_task_struct(timer->it.cpu.task); |
526 | timer->it.cpu.task = NULL; | 479 | timer->it.cpu.task = NULL; |
527 | timer->it.cpu.expires = cpu_time_sub(timer->it_clock, | 480 | timer->it.cpu.expires -= now; |
528 | timer->it.cpu.expires, | ||
529 | now); | ||
530 | } | 481 | } |
531 | 482 | ||
532 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | 483 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
@@ -558,14 +509,14 @@ static void arm_timer(struct k_itimer *timer) | |||
558 | 509 | ||
559 | listpos = head; | 510 | listpos = head; |
560 | list_for_each_entry(next, head, entry) { | 511 | list_for_each_entry(next, head, entry) { |
561 | if (cpu_time_before(timer->it_clock, nt->expires, next->expires)) | 512 | if (nt->expires < next->expires) |
562 | break; | 513 | break; |
563 | listpos = &next->entry; | 514 | listpos = &next->entry; |
564 | } | 515 | } |
565 | list_add(&nt->entry, listpos); | 516 | list_add(&nt->entry, listpos); |
566 | 517 | ||
567 | if (listpos == head) { | 518 | if (listpos == head) { |
568 | union cpu_time_count *exp = &nt->expires; | 519 | unsigned long long exp = nt->expires; |
569 | 520 | ||
570 | /* | 521 | /* |
571 | * We are the new earliest-expiring POSIX 1.b timer, hence | 522 | * We are the new earliest-expiring POSIX 1.b timer, hence |
@@ -576,17 +527,17 @@ static void arm_timer(struct k_itimer *timer) | |||
576 | 527 | ||
577 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 528 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
578 | case CPUCLOCK_PROF: | 529 | case CPUCLOCK_PROF: |
579 | if (expires_gt(cputime_expires->prof_exp, exp->cpu)) | 530 | if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) |
580 | cputime_expires->prof_exp = exp->cpu; | 531 | cputime_expires->prof_exp = expires_to_cputime(exp); |
581 | break; | 532 | break; |
582 | case CPUCLOCK_VIRT: | 533 | case CPUCLOCK_VIRT: |
583 | if (expires_gt(cputime_expires->virt_exp, exp->cpu)) | 534 | if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) |
584 | cputime_expires->virt_exp = exp->cpu; | 535 | cputime_expires->virt_exp = expires_to_cputime(exp); |
585 | break; | 536 | break; |
586 | case CPUCLOCK_SCHED: | 537 | case CPUCLOCK_SCHED: |
587 | if (cputime_expires->sched_exp == 0 || | 538 | if (cputime_expires->sched_exp == 0 || |
588 | cputime_expires->sched_exp > exp->sched) | 539 | cputime_expires->sched_exp > exp) |
589 | cputime_expires->sched_exp = exp->sched; | 540 | cputime_expires->sched_exp = exp; |
590 | break; | 541 | break; |
591 | } | 542 | } |
592 | } | 543 | } |
@@ -601,20 +552,20 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
601 | /* | 552 | /* |
602 | * User don't want any signal. | 553 | * User don't want any signal. |
603 | */ | 554 | */ |
604 | timer->it.cpu.expires.sched = 0; | 555 | timer->it.cpu.expires = 0; |
605 | } else if (unlikely(timer->sigq == NULL)) { | 556 | } else if (unlikely(timer->sigq == NULL)) { |
606 | /* | 557 | /* |
607 | * This a special case for clock_nanosleep, | 558 | * This a special case for clock_nanosleep, |
608 | * not a normal timer from sys_timer_create. | 559 | * not a normal timer from sys_timer_create. |
609 | */ | 560 | */ |
610 | wake_up_process(timer->it_process); | 561 | wake_up_process(timer->it_process); |
611 | timer->it.cpu.expires.sched = 0; | 562 | timer->it.cpu.expires = 0; |
612 | } else if (timer->it.cpu.incr.sched == 0) { | 563 | } else if (timer->it.cpu.incr == 0) { |
613 | /* | 564 | /* |
614 | * One-shot timer. Clear it as soon as it's fired. | 565 | * One-shot timer. Clear it as soon as it's fired. |
615 | */ | 566 | */ |
616 | posix_timer_event(timer, 0); | 567 | posix_timer_event(timer, 0); |
617 | timer->it.cpu.expires.sched = 0; | 568 | timer->it.cpu.expires = 0; |
618 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { | 569 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
619 | /* | 570 | /* |
620 | * The signal did not get queued because the signal | 571 | * The signal did not get queued because the signal |
@@ -632,7 +583,7 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
632 | */ | 583 | */ |
633 | static int cpu_timer_sample_group(const clockid_t which_clock, | 584 | static int cpu_timer_sample_group(const clockid_t which_clock, |
634 | struct task_struct *p, | 585 | struct task_struct *p, |
635 | union cpu_time_count *cpu) | 586 | unsigned long long *sample) |
636 | { | 587 | { |
637 | struct task_cputime cputime; | 588 | struct task_cputime cputime; |
638 | 589 | ||
@@ -641,13 +592,13 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
641 | default: | 592 | default: |
642 | return -EINVAL; | 593 | return -EINVAL; |
643 | case CPUCLOCK_PROF: | 594 | case CPUCLOCK_PROF: |
644 | cpu->cpu = cputime.utime + cputime.stime; | 595 | *sample = cputime_to_expires(cputime.utime + cputime.stime); |
645 | break; | 596 | break; |
646 | case CPUCLOCK_VIRT: | 597 | case CPUCLOCK_VIRT: |
647 | cpu->cpu = cputime.utime; | 598 | *sample = cputime_to_expires(cputime.utime); |
648 | break; | 599 | break; |
649 | case CPUCLOCK_SCHED: | 600 | case CPUCLOCK_SCHED: |
650 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | 601 | *sample = cputime.sum_exec_runtime + task_delta_exec(p); |
651 | break; | 602 | break; |
652 | } | 603 | } |
653 | return 0; | 604 | return 0; |
@@ -694,7 +645,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
694 | struct itimerspec *new, struct itimerspec *old) | 645 | struct itimerspec *new, struct itimerspec *old) |
695 | { | 646 | { |
696 | struct task_struct *p = timer->it.cpu.task; | 647 | struct task_struct *p = timer->it.cpu.task; |
697 | union cpu_time_count old_expires, new_expires, old_incr, val; | 648 | unsigned long long old_expires, new_expires, old_incr, val; |
698 | int ret; | 649 | int ret; |
699 | 650 | ||
700 | if (unlikely(p == NULL)) { | 651 | if (unlikely(p == NULL)) { |
@@ -749,7 +700,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
749 | } | 700 | } |
750 | 701 | ||
751 | if (old) { | 702 | if (old) { |
752 | if (old_expires.sched == 0) { | 703 | if (old_expires == 0) { |
753 | old->it_value.tv_sec = 0; | 704 | old->it_value.tv_sec = 0; |
754 | old->it_value.tv_nsec = 0; | 705 | old->it_value.tv_nsec = 0; |
755 | } else { | 706 | } else { |
@@ -764,11 +715,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
764 | * new setting. | 715 | * new setting. |
765 | */ | 716 | */ |
766 | bump_cpu_timer(timer, val); | 717 | bump_cpu_timer(timer, val); |
767 | if (cpu_time_before(timer->it_clock, val, | 718 | if (val < timer->it.cpu.expires) { |
768 | timer->it.cpu.expires)) { | 719 | old_expires = timer->it.cpu.expires - val; |
769 | old_expires = cpu_time_sub( | ||
770 | timer->it_clock, | ||
771 | timer->it.cpu.expires, val); | ||
772 | sample_to_timespec(timer->it_clock, | 720 | sample_to_timespec(timer->it_clock, |
773 | old_expires, | 721 | old_expires, |
774 | &old->it_value); | 722 | &old->it_value); |
@@ -791,8 +739,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
791 | goto out; | 739 | goto out; |
792 | } | 740 | } |
793 | 741 | ||
794 | if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) { | 742 | if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { |
795 | cpu_time_add(timer->it_clock, &new_expires, val); | 743 | new_expires += val; |
796 | } | 744 | } |
797 | 745 | ||
798 | /* | 746 | /* |
@@ -801,8 +749,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
801 | * arm the timer (we'll just fake it for timer_gettime). | 749 | * arm the timer (we'll just fake it for timer_gettime). |
802 | */ | 750 | */ |
803 | timer->it.cpu.expires = new_expires; | 751 | timer->it.cpu.expires = new_expires; |
804 | if (new_expires.sched != 0 && | 752 | if (new_expires != 0 && val < new_expires) { |
805 | cpu_time_before(timer->it_clock, val, new_expires)) { | ||
806 | arm_timer(timer); | 753 | arm_timer(timer); |
807 | } | 754 | } |
808 | 755 | ||
@@ -826,8 +773,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
826 | timer->it_overrun_last = 0; | 773 | timer->it_overrun_last = 0; |
827 | timer->it_overrun = -1; | 774 | timer->it_overrun = -1; |
828 | 775 | ||
829 | if (new_expires.sched != 0 && | 776 | if (new_expires != 0 && !(val < new_expires)) { |
830 | !cpu_time_before(timer->it_clock, val, new_expires)) { | ||
831 | /* | 777 | /* |
832 | * The designated time already passed, so we notify | 778 | * The designated time already passed, so we notify |
833 | * immediately, even if the thread never runs to | 779 | * immediately, even if the thread never runs to |
@@ -849,7 +795,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
849 | 795 | ||
850 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | 796 | static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
851 | { | 797 | { |
852 | union cpu_time_count now; | 798 | unsigned long long now; |
853 | struct task_struct *p = timer->it.cpu.task; | 799 | struct task_struct *p = timer->it.cpu.task; |
854 | int clear_dead; | 800 | int clear_dead; |
855 | 801 | ||
@@ -859,7 +805,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
859 | sample_to_timespec(timer->it_clock, | 805 | sample_to_timespec(timer->it_clock, |
860 | timer->it.cpu.incr, &itp->it_interval); | 806 | timer->it.cpu.incr, &itp->it_interval); |
861 | 807 | ||
862 | if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */ | 808 | if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ |
863 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; | 809 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
864 | return; | 810 | return; |
865 | } | 811 | } |
@@ -891,7 +837,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
891 | */ | 837 | */ |
892 | put_task_struct(p); | 838 | put_task_struct(p); |
893 | timer->it.cpu.task = NULL; | 839 | timer->it.cpu.task = NULL; |
894 | timer->it.cpu.expires.sched = 0; | 840 | timer->it.cpu.expires = 0; |
895 | read_unlock(&tasklist_lock); | 841 | read_unlock(&tasklist_lock); |
896 | goto dead; | 842 | goto dead; |
897 | } else { | 843 | } else { |
@@ -912,10 +858,9 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
912 | goto dead; | 858 | goto dead; |
913 | } | 859 | } |
914 | 860 | ||
915 | if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) { | 861 | if (now < timer->it.cpu.expires) { |
916 | sample_to_timespec(timer->it_clock, | 862 | sample_to_timespec(timer->it_clock, |
917 | cpu_time_sub(timer->it_clock, | 863 | timer->it.cpu.expires - now, |
918 | timer->it.cpu.expires, now), | ||
919 | &itp->it_value); | 864 | &itp->it_value); |
920 | } else { | 865 | } else { |
921 | /* | 866 | /* |
@@ -946,8 +891,8 @@ static void check_thread_timers(struct task_struct *tsk, | |||
946 | struct cpu_timer_list *t = list_first_entry(timers, | 891 | struct cpu_timer_list *t = list_first_entry(timers, |
947 | struct cpu_timer_list, | 892 | struct cpu_timer_list, |
948 | entry); | 893 | entry); |
949 | if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { | 894 | if (!--maxfire || prof_ticks(tsk) < t->expires) { |
950 | tsk->cputime_expires.prof_exp = t->expires.cpu; | 895 | tsk->cputime_expires.prof_exp = expires_to_cputime(t->expires); |
951 | break; | 896 | break; |
952 | } | 897 | } |
953 | t->firing = 1; | 898 | t->firing = 1; |
@@ -961,8 +906,8 @@ static void check_thread_timers(struct task_struct *tsk, | |||
961 | struct cpu_timer_list *t = list_first_entry(timers, | 906 | struct cpu_timer_list *t = list_first_entry(timers, |
962 | struct cpu_timer_list, | 907 | struct cpu_timer_list, |
963 | entry); | 908 | entry); |
964 | if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { | 909 | if (!--maxfire || virt_ticks(tsk) < t->expires) { |
965 | tsk->cputime_expires.virt_exp = t->expires.cpu; | 910 | tsk->cputime_expires.virt_exp = expires_to_cputime(t->expires); |
966 | break; | 911 | break; |
967 | } | 912 | } |
968 | t->firing = 1; | 913 | t->firing = 1; |
@@ -976,8 +921,8 @@ static void check_thread_timers(struct task_struct *tsk, | |||
976 | struct cpu_timer_list *t = list_first_entry(timers, | 921 | struct cpu_timer_list *t = list_first_entry(timers, |
977 | struct cpu_timer_list, | 922 | struct cpu_timer_list, |
978 | entry); | 923 | entry); |
979 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { | 924 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires) { |
980 | tsk->cputime_expires.sched_exp = t->expires.sched; | 925 | tsk->cputime_expires.sched_exp = t->expires; |
981 | break; | 926 | break; |
982 | } | 927 | } |
983 | t->firing = 1; | 928 | t->firing = 1; |
@@ -1030,7 +975,8 @@ static void stop_process_timers(struct signal_struct *sig) | |||
1030 | static u32 onecputick; | 975 | static u32 onecputick; |
1031 | 976 | ||
1032 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | 977 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
1033 | cputime_t *expires, cputime_t cur_time, int signo) | 978 | unsigned long long *expires, |
979 | unsigned long long cur_time, int signo) | ||
1034 | { | 980 | { |
1035 | if (!it->expires) | 981 | if (!it->expires) |
1036 | return; | 982 | return; |
@@ -1068,7 +1014,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1068 | { | 1014 | { |
1069 | int maxfire; | 1015 | int maxfire; |
1070 | struct signal_struct *const sig = tsk->signal; | 1016 | struct signal_struct *const sig = tsk->signal; |
1071 | cputime_t utime, ptime, virt_expires, prof_expires; | 1017 | unsigned long long utime, ptime, virt_expires, prof_expires; |
1072 | unsigned long long sum_sched_runtime, sched_expires; | 1018 | unsigned long long sum_sched_runtime, sched_expires; |
1073 | struct list_head *timers = sig->cpu_timers; | 1019 | struct list_head *timers = sig->cpu_timers; |
1074 | struct task_cputime cputime; | 1020 | struct task_cputime cputime; |
@@ -1078,8 +1024,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1078 | * Collect the current process totals. | 1024 | * Collect the current process totals. |
1079 | */ | 1025 | */ |
1080 | thread_group_cputimer(tsk, &cputime); | 1026 | thread_group_cputimer(tsk, &cputime); |
1081 | utime = cputime.utime; | 1027 | utime = cputime_to_expires(cputime.utime); |
1082 | ptime = utime + cputime.stime; | 1028 | ptime = utime + cputime_to_expires(cputime.stime); |
1083 | sum_sched_runtime = cputime.sum_exec_runtime; | 1029 | sum_sched_runtime = cputime.sum_exec_runtime; |
1084 | maxfire = 20; | 1030 | maxfire = 20; |
1085 | prof_expires = 0; | 1031 | prof_expires = 0; |
@@ -1087,8 +1033,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1087 | struct cpu_timer_list *tl = list_first_entry(timers, | 1033 | struct cpu_timer_list *tl = list_first_entry(timers, |
1088 | struct cpu_timer_list, | 1034 | struct cpu_timer_list, |
1089 | entry); | 1035 | entry); |
1090 | if (!--maxfire || ptime < tl->expires.cpu) { | 1036 | if (!--maxfire || ptime < tl->expires) { |
1091 | prof_expires = tl->expires.cpu; | 1037 | prof_expires = tl->expires; |
1092 | break; | 1038 | break; |
1093 | } | 1039 | } |
1094 | tl->firing = 1; | 1040 | tl->firing = 1; |
@@ -1102,8 +1048,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1102 | struct cpu_timer_list *tl = list_first_entry(timers, | 1048 | struct cpu_timer_list *tl = list_first_entry(timers, |
1103 | struct cpu_timer_list, | 1049 | struct cpu_timer_list, |
1104 | entry); | 1050 | entry); |
1105 | if (!--maxfire || utime < tl->expires.cpu) { | 1051 | if (!--maxfire || utime < tl->expires) { |
1106 | virt_expires = tl->expires.cpu; | 1052 | virt_expires = tl->expires; |
1107 | break; | 1053 | break; |
1108 | } | 1054 | } |
1109 | tl->firing = 1; | 1055 | tl->firing = 1; |
@@ -1117,8 +1063,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1117 | struct cpu_timer_list *tl = list_first_entry(timers, | 1063 | struct cpu_timer_list *tl = list_first_entry(timers, |
1118 | struct cpu_timer_list, | 1064 | struct cpu_timer_list, |
1119 | entry); | 1065 | entry); |
1120 | if (!--maxfire || sum_sched_runtime < tl->expires.sched) { | 1066 | if (!--maxfire || sum_sched_runtime < tl->expires) { |
1121 | sched_expires = tl->expires.sched; | 1067 | sched_expires = tl->expires; |
1122 | break; | 1068 | break; |
1123 | } | 1069 | } |
1124 | tl->firing = 1; | 1070 | tl->firing = 1; |
@@ -1162,8 +1108,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1162 | } | 1108 | } |
1163 | } | 1109 | } |
1164 | 1110 | ||
1165 | sig->cputime_expires.prof_exp = prof_expires; | 1111 | sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); |
1166 | sig->cputime_expires.virt_exp = virt_expires; | 1112 | sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); |
1167 | sig->cputime_expires.sched_exp = sched_expires; | 1113 | sig->cputime_expires.sched_exp = sched_expires; |
1168 | if (task_cputime_zero(&sig->cputime_expires)) | 1114 | if (task_cputime_zero(&sig->cputime_expires)) |
1169 | stop_process_timers(sig); | 1115 | stop_process_timers(sig); |
@@ -1176,7 +1122,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1176 | void posix_cpu_timer_schedule(struct k_itimer *timer) | 1122 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1177 | { | 1123 | { |
1178 | struct task_struct *p = timer->it.cpu.task; | 1124 | struct task_struct *p = timer->it.cpu.task; |
1179 | union cpu_time_count now; | 1125 | unsigned long long now; |
1180 | 1126 | ||
1181 | if (unlikely(p == NULL)) | 1127 | if (unlikely(p == NULL)) |
1182 | /* | 1128 | /* |
@@ -1205,7 +1151,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1205 | */ | 1151 | */ |
1206 | put_task_struct(p); | 1152 | put_task_struct(p); |
1207 | timer->it.cpu.task = p = NULL; | 1153 | timer->it.cpu.task = p = NULL; |
1208 | timer->it.cpu.expires.sched = 0; | 1154 | timer->it.cpu.expires = 0; |
1209 | goto out_unlock; | 1155 | goto out_unlock; |
1210 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { | 1156 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1211 | /* | 1157 | /* |
@@ -1387,7 +1333,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1387 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | 1333 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1388 | cputime_t *newval, cputime_t *oldval) | 1334 | cputime_t *newval, cputime_t *oldval) |
1389 | { | 1335 | { |
1390 | union cpu_time_count now; | 1336 | unsigned long long now; |
1391 | 1337 | ||
1392 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1338 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1393 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1339 | cpu_timer_sample_group(clock_idx, tsk, &now); |
@@ -1399,17 +1345,17 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1399 | * it to be absolute. | 1345 | * it to be absolute. |
1400 | */ | 1346 | */ |
1401 | if (*oldval) { | 1347 | if (*oldval) { |
1402 | if (*oldval <= now.cpu) { | 1348 | if (*oldval <= now) { |
1403 | /* Just about to fire. */ | 1349 | /* Just about to fire. */ |
1404 | *oldval = cputime_one_jiffy; | 1350 | *oldval = cputime_one_jiffy; |
1405 | } else { | 1351 | } else { |
1406 | *oldval -= now.cpu; | 1352 | *oldval -= now; |
1407 | } | 1353 | } |
1408 | } | 1354 | } |
1409 | 1355 | ||
1410 | if (!*newval) | 1356 | if (!*newval) |
1411 | goto out; | 1357 | goto out; |
1412 | *newval += now.cpu; | 1358 | *newval += now; |
1413 | } | 1359 | } |
1414 | 1360 | ||
1415 | /* | 1361 | /* |
@@ -1459,7 +1405,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1459 | } | 1405 | } |
1460 | 1406 | ||
1461 | while (!signal_pending(current)) { | 1407 | while (!signal_pending(current)) { |
1462 | if (timer.it.cpu.expires.sched == 0) { | 1408 | if (timer.it.cpu.expires == 0) { |
1463 | /* | 1409 | /* |
1464 | * Our timer fired and was reset, below | 1410 | * Our timer fired and was reset, below |
1465 | * deletion can not fail. | 1411 | * deletion can not fail. |