diff options
author | Stanislaw Gruszka <sgruszka@redhat.com> | 2010-03-11 17:04:37 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-03-12 16:40:39 -0500 |
commit | f55db609042faecd5e518ce372b87f846659b32e (patch) | |
tree | ca544704af0825aad53d39fa517a945ba89e59ad /kernel/posix-cpu-timers.c | |
parent | 522dba7134d6b2e5821d3457f7941ec34f668e6d (diff) |
cpu-timers: Simplify RLIMIT_CPU handling
Let always set signal->cputime_expires expiration cache when setting
new itimer, POSIX 1.b timer, and RLIMIT_CPU. Since we are
initializing prof_exp expiration cache during fork(), this allows to
remove "RLIMIT_CPU != inf" check from fastpath_timer_check() and do
some other cleanups.
Checked against regression using test cases from:
http://marc.info/?l=linux-kernel&m=123749066504641&w=4
http://marc.info/?l=linux-kernel&m=123811277916642&w=2
Signed-off-by: Stanislaw Gruszka <sgruszka@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 75 |
1 files changed, 27 insertions, 48 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 1a22dfd42df9..d01e0a348e61 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -11,19 +11,18 @@ | |||
11 | #include <trace/events/timer.h> | 11 | #include <trace/events/timer.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to run cpu timer and update |
15 | * tsk->signal->cputime_expires expiration cache if necessary. Needs | ||
16 | * siglock protection since other code may update expiration cache as | ||
17 | * well. | ||
15 | */ | 18 | */ |
16 | void update_rlimit_cpu(unsigned long rlim_new) | 19 | void update_rlimit_cpu(unsigned long rlim_new) |
17 | { | 20 | { |
18 | cputime_t cputime = secs_to_cputime(rlim_new); | 21 | cputime_t cputime = secs_to_cputime(rlim_new); |
19 | struct signal_struct *const sig = current->signal; | ||
20 | 22 | ||
21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || | 23 | spin_lock_irq(¤t->sighand->siglock); |
22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
23 | spin_lock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | ||
25 | spin_unlock_irq(¤t->sighand->siglock); | ||
26 | } | ||
27 | } | 26 | } |
28 | 27 | ||
29 | static int check_clock(const clockid_t which_clock) | 28 | static int check_clock(const clockid_t which_clock) |
@@ -564,7 +563,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
564 | struct list_head *head, *listpos; | 563 | struct list_head *head, *listpos; |
565 | struct cpu_timer_list *const nt = &timer->it.cpu; | 564 | struct cpu_timer_list *const nt = &timer->it.cpu; |
566 | struct cpu_timer_list *next; | 565 | struct cpu_timer_list *next; |
567 | unsigned long i; | ||
568 | 566 | ||
569 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? | 567 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? |
570 | p->cpu_timers : p->signal->cpu_timers); | 568 | p->cpu_timers : p->signal->cpu_timers); |
@@ -630,20 +628,11 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
630 | default: | 628 | default: |
631 | BUG(); | 629 | BUG(); |
632 | case CPUCLOCK_VIRT: | 630 | case CPUCLOCK_VIRT: |
633 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, | 631 | if (expires_gt(sig->cputime_expires.virt_exp, exp->cpu)) |
634 | exp->cpu)) | 632 | sig->cputime_expires.virt_exp = exp->cpu; |
635 | break; | ||
636 | sig->cputime_expires.virt_exp = exp->cpu; | ||
637 | break; | ||
638 | case CPUCLOCK_PROF: | 633 | case CPUCLOCK_PROF: |
639 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, | 634 | if (expires_gt(sig->cputime_expires.prof_exp, exp->cpu)) |
640 | exp->cpu)) | 635 | sig->cputime_expires.prof_exp = exp->cpu; |
641 | break; | ||
642 | i = sig->rlim[RLIMIT_CPU].rlim_cur; | ||
643 | if (i != RLIM_INFINITY && | ||
644 | i <= cputime_to_secs(exp->cpu)) | ||
645 | break; | ||
646 | sig->cputime_expires.prof_exp = exp->cpu; | ||
647 | break; | 636 | break; |
648 | case CPUCLOCK_SCHED: | 637 | case CPUCLOCK_SCHED: |
649 | sig->cputime_expires.sched_exp = exp->sched; | 638 | sig->cputime_expires.sched_exp = exp->sched; |
@@ -1386,7 +1375,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1386 | return 1; | 1375 | return 1; |
1387 | } | 1376 | } |
1388 | 1377 | ||
1389 | return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; | 1378 | return 0; |
1390 | } | 1379 | } |
1391 | 1380 | ||
1392 | /* | 1381 | /* |
@@ -1452,21 +1441,23 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1452 | } | 1441 | } |
1453 | 1442 | ||
1454 | /* | 1443 | /* |
1455 | * Set one of the process-wide special case CPU timers. | 1444 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
1456 | * The tsk->sighand->siglock must be held by the caller. | 1445 | * The tsk->sighand->siglock must be held by the caller. |
1457 | * The *newval argument is relative and we update it to be absolute, *oldval | ||
1458 | * is absolute and we update it to be relative. | ||
1459 | */ | 1446 | */ |
1460 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | 1447 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1461 | cputime_t *newval, cputime_t *oldval) | 1448 | cputime_t *newval, cputime_t *oldval) |
1462 | { | 1449 | { |
1463 | union cpu_time_count now; | 1450 | union cpu_time_count now; |
1464 | struct list_head *head; | ||
1465 | 1451 | ||
1466 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1452 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1467 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1453 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1468 | 1454 | ||
1469 | if (oldval) { | 1455 | if (oldval) { |
1456 | /* | ||
1457 | * We are setting itimer. The *oldval is absolute and we update | ||
1458 | * it to be relative, *newval argument is relative and we update | ||
1459 | * it to be absolute. | ||
1460 | */ | ||
1470 | if (!cputime_eq(*oldval, cputime_zero)) { | 1461 | if (!cputime_eq(*oldval, cputime_zero)) { |
1471 | if (cputime_le(*oldval, now.cpu)) { | 1462 | if (cputime_le(*oldval, now.cpu)) { |
1472 | /* Just about to fire. */ | 1463 | /* Just about to fire. */ |
@@ -1479,33 +1470,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1479 | if (cputime_eq(*newval, cputime_zero)) | 1470 | if (cputime_eq(*newval, cputime_zero)) |
1480 | return; | 1471 | return; |
1481 | *newval = cputime_add(*newval, now.cpu); | 1472 | *newval = cputime_add(*newval, now.cpu); |
1482 | |||
1483 | /* | ||
1484 | * If the RLIMIT_CPU timer will expire before the | ||
1485 | * ITIMER_PROF timer, we have nothing else to do. | ||
1486 | */ | ||
1487 | if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur | ||
1488 | < cputime_to_secs(*newval)) | ||
1489 | return; | ||
1490 | } | 1473 | } |
1491 | 1474 | ||
1492 | /* | 1475 | /* |
1493 | * Check whether there are any process timers already set to fire | 1476 | * Update expiration cache if we are the earliest timer, or eventually |
1494 | * before this one. If so, we don't have anything more to do. | 1477 | * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. |
1495 | */ | 1478 | */ |
1496 | head = &tsk->signal->cpu_timers[clock_idx]; | 1479 | switch (clock_idx) { |
1497 | if (list_empty(head) || | 1480 | case CPUCLOCK_PROF: |
1498 | cputime_ge(list_first_entry(head, | 1481 | if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) |
1499 | struct cpu_timer_list, entry)->expires.cpu, | ||
1500 | *newval)) { | ||
1501 | switch (clock_idx) { | ||
1502 | case CPUCLOCK_PROF: | ||
1503 | tsk->signal->cputime_expires.prof_exp = *newval; | 1482 | tsk->signal->cputime_expires.prof_exp = *newval; |
1504 | break; | 1483 | break; |
1505 | case CPUCLOCK_VIRT: | 1484 | case CPUCLOCK_VIRT: |
1485 | if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) | ||
1506 | tsk->signal->cputime_expires.virt_exp = *newval; | 1486 | tsk->signal->cputime_expires.virt_exp = *newval; |
1507 | break; | 1487 | break; |
1508 | } | ||
1509 | } | 1488 | } |
1510 | } | 1489 | } |
1511 | 1490 | ||