diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-10-27 19:12:49 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2005-10-27 19:12:49 -0400 |
commit | 4c2cb58c552a34744979a99ccf01762d5eb7e288 (patch) | |
tree | fd35360eb0cb08b07f3a5f4bdf1ebd90a769311d /kernel/posix-cpu-timers.c | |
parent | 34123da66e613602de5a886b05c875b6a91b8ed2 (diff) | |
parent | 72ab373a5688a78cbdaf3bf96012e597d5399bb7 (diff) |
Merge /home/trondmy/scm/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 110 |
1 files changed, 63 insertions, 47 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index b3f3edc475de..bf374fceb39c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, | |||
91 | * Update expiry time from increment, and increase overrun count, | 91 | * Update expiry time from increment, and increase overrun count, |
92 | * given the current clock sample. | 92 | * given the current clock sample. |
93 | */ | 93 | */ |
94 | static inline void bump_cpu_timer(struct k_itimer *timer, | 94 | static void bump_cpu_timer(struct k_itimer *timer, |
95 | union cpu_time_count now) | 95 | union cpu_time_count now) |
96 | { | 96 | { |
97 | int i; | 97 | int i; |
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
110 | for (i = 0; incr < delta - incr; i++) | 110 | for (i = 0; incr < delta - incr; i++) |
111 | incr = incr << 1; | 111 | incr = incr << 1; |
112 | for (; i >= 0; incr >>= 1, i--) { | 112 | for (; i >= 0; incr >>= 1, i--) { |
113 | if (delta <= incr) | 113 | if (delta < incr) |
114 | continue; | 114 | continue; |
115 | timer->it.cpu.expires.sched += incr; | 115 | timer->it.cpu.expires.sched += incr; |
116 | timer->it_overrun += 1 << i; | 116 | timer->it_overrun += 1 << i; |
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | 128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) |
129 | incr = cputime_add(incr, incr); | 129 | incr = cputime_add(incr, incr); |
130 | for (; i >= 0; incr = cputime_halve(incr), i--) { | 130 | for (; i >= 0; incr = cputime_halve(incr), i--) { |
131 | if (cputime_le(delta, incr)) | 131 | if (cputime_lt(delta, incr)) |
132 | continue; | 132 | continue; |
133 | timer->it.cpu.expires.cpu = | 133 | timer->it.cpu.expires.cpu = |
134 | cputime_add(timer->it.cpu.expires.cpu, incr); | 134 | cputime_add(timer->it.cpu.expires.cpu, incr); |
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
380 | int posix_cpu_timer_del(struct k_itimer *timer) | 380 | int posix_cpu_timer_del(struct k_itimer *timer) |
381 | { | 381 | { |
382 | struct task_struct *p = timer->it.cpu.task; | 382 | struct task_struct *p = timer->it.cpu.task; |
383 | int ret = 0; | ||
383 | 384 | ||
384 | if (timer->it.cpu.firing) | 385 | if (likely(p != NULL)) { |
385 | return TIMER_RETRY; | 386 | read_lock(&tasklist_lock); |
386 | 387 | if (unlikely(p->signal == NULL)) { | |
387 | if (unlikely(p == NULL)) | 388 | /* |
388 | return 0; | 389 | * We raced with the reaping of the task. |
390 | * The deletion should have cleared us off the list. | ||
391 | */ | ||
392 | BUG_ON(!list_empty(&timer->it.cpu.entry)); | ||
393 | } else { | ||
394 | spin_lock(&p->sighand->siglock); | ||
395 | if (timer->it.cpu.firing) | ||
396 | ret = TIMER_RETRY; | ||
397 | else | ||
398 | list_del(&timer->it.cpu.entry); | ||
399 | spin_unlock(&p->sighand->siglock); | ||
400 | } | ||
401 | read_unlock(&tasklist_lock); | ||
389 | 402 | ||
390 | spin_lock(&p->sighand->siglock); | 403 | if (!ret) |
391 | if (!list_empty(&timer->it.cpu.entry)) { | 404 | put_task_struct(p); |
392 | /* | ||
393 | * Take us off the task's timer list. We don't need to | ||
394 | * take tasklist_lock and check for the task being reaped. | ||
395 | * If it was reaped, it already called posix_cpu_timers_exit | ||
396 | * and posix_cpu_timers_exit_group to clear all the timers | ||
397 | * that pointed to it. | ||
398 | */ | ||
399 | list_del(&timer->it.cpu.entry); | ||
400 | put_task_struct(p); | ||
401 | } | 405 | } |
402 | spin_unlock(&p->sighand->siglock); | ||
403 | 406 | ||
404 | return 0; | 407 | return ret; |
405 | } | 408 | } |
406 | 409 | ||
407 | /* | 410 | /* |
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head, | |||
418 | cputime_t ptime = cputime_add(utime, stime); | 421 | cputime_t ptime = cputime_add(utime, stime); |
419 | 422 | ||
420 | list_for_each_entry_safe(timer, next, head, entry) { | 423 | list_for_each_entry_safe(timer, next, head, entry) { |
421 | put_task_struct(timer->task); | ||
422 | timer->task = NULL; | ||
423 | list_del_init(&timer->entry); | 424 | list_del_init(&timer->entry); |
424 | if (cputime_lt(timer->expires.cpu, ptime)) { | 425 | if (cputime_lt(timer->expires.cpu, ptime)) { |
425 | timer->expires.cpu = cputime_zero; | 426 | timer->expires.cpu = cputime_zero; |
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head, | |||
431 | 432 | ||
432 | ++head; | 433 | ++head; |
433 | list_for_each_entry_safe(timer, next, head, entry) { | 434 | list_for_each_entry_safe(timer, next, head, entry) { |
434 | put_task_struct(timer->task); | ||
435 | timer->task = NULL; | ||
436 | list_del_init(&timer->entry); | 435 | list_del_init(&timer->entry); |
437 | if (cputime_lt(timer->expires.cpu, utime)) { | 436 | if (cputime_lt(timer->expires.cpu, utime)) { |
438 | timer->expires.cpu = cputime_zero; | 437 | timer->expires.cpu = cputime_zero; |
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head, | |||
444 | 443 | ||
445 | ++head; | 444 | ++head; |
446 | list_for_each_entry_safe(timer, next, head, entry) { | 445 | list_for_each_entry_safe(timer, next, head, entry) { |
447 | put_task_struct(timer->task); | ||
448 | timer->task = NULL; | ||
449 | list_del_init(&timer->entry); | 446 | list_del_init(&timer->entry); |
450 | if (timer->expires.sched < sched_time) { | 447 | if (timer->expires.sched < sched_time) { |
451 | timer->expires.sched = 0; | 448 | timer->expires.sched = 0; |
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p, | |||
489 | struct task_struct *t = p; | 486 | struct task_struct *t = p; |
490 | unsigned int nthreads = atomic_read(&p->signal->live); | 487 | unsigned int nthreads = atomic_read(&p->signal->live); |
491 | 488 | ||
489 | if (!nthreads) | ||
490 | return; | ||
491 | |||
492 | switch (clock_idx) { | 492 | switch (clock_idx) { |
493 | default: | 493 | default: |
494 | BUG(); | 494 | BUG(); |
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
497 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 497 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
498 | nthreads); | 498 | nthreads); |
499 | do { | 499 | do { |
500 | if (!unlikely(t->exit_state)) { | 500 | if (!unlikely(t->flags & PF_EXITING)) { |
501 | ticks = cputime_add(prof_ticks(t), left); | 501 | ticks = cputime_add(prof_ticks(t), left); |
502 | if (cputime_eq(t->it_prof_expires, | 502 | if (cputime_eq(t->it_prof_expires, |
503 | cputime_zero) || | 503 | cputime_zero) || |
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
512 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 512 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
513 | nthreads); | 513 | nthreads); |
514 | do { | 514 | do { |
515 | if (!unlikely(t->exit_state)) { | 515 | if (!unlikely(t->flags & PF_EXITING)) { |
516 | ticks = cputime_add(virt_ticks(t), left); | 516 | ticks = cputime_add(virt_ticks(t), left); |
517 | if (cputime_eq(t->it_virt_expires, | 517 | if (cputime_eq(t->it_virt_expires, |
518 | cputime_zero) || | 518 | cputime_zero) || |
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
527 | nsleft = expires.sched - val.sched; | 527 | nsleft = expires.sched - val.sched; |
528 | do_div(nsleft, nthreads); | 528 | do_div(nsleft, nthreads); |
529 | do { | 529 | do { |
530 | if (!unlikely(t->exit_state)) { | 530 | if (!unlikely(t->flags & PF_EXITING)) { |
531 | ns = t->sched_time + nsleft; | 531 | ns = t->sched_time + nsleft; |
532 | if (t->it_sched_expires == 0 || | 532 | if (t->it_sched_expires == 0 || |
533 | t->it_sched_expires > ns) { | 533 | t->it_sched_expires > ns) { |
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
566 | struct cpu_timer_list *next; | 566 | struct cpu_timer_list *next; |
567 | unsigned long i; | 567 | unsigned long i; |
568 | 568 | ||
569 | if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING)) | ||
570 | return; | ||
571 | |||
569 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? | 572 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? |
570 | p->cpu_timers : p->signal->cpu_timers); | 573 | p->cpu_timers : p->signal->cpu_timers); |
571 | head += CPUCLOCK_WHICH(timer->it_clock); | 574 | head += CPUCLOCK_WHICH(timer->it_clock); |
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
576 | listpos = head; | 579 | listpos = head; |
577 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 580 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
578 | list_for_each_entry(next, head, entry) { | 581 | list_for_each_entry(next, head, entry) { |
579 | if (next->expires.sched > nt->expires.sched) { | 582 | if (next->expires.sched > nt->expires.sched) |
580 | listpos = &next->entry; | ||
581 | break; | 583 | break; |
582 | } | 584 | listpos = &next->entry; |
583 | } | 585 | } |
584 | } else { | 586 | } else { |
585 | list_for_each_entry(next, head, entry) { | 587 | list_for_each_entry(next, head, entry) { |
586 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { | 588 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) |
587 | listpos = &next->entry; | ||
588 | break; | 589 | break; |
589 | } | 590 | listpos = &next->entry; |
590 | } | 591 | } |
591 | } | 592 | } |
592 | list_add(&nt->entry, listpos); | 593 | list_add(&nt->entry, listpos); |
@@ -730,9 +731,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
730 | * Disarm any old timer after extracting its expiry time. | 731 | * Disarm any old timer after extracting its expiry time. |
731 | */ | 732 | */ |
732 | BUG_ON(!irqs_disabled()); | 733 | BUG_ON(!irqs_disabled()); |
734 | |||
735 | ret = 0; | ||
733 | spin_lock(&p->sighand->siglock); | 736 | spin_lock(&p->sighand->siglock); |
734 | old_expires = timer->it.cpu.expires; | 737 | old_expires = timer->it.cpu.expires; |
735 | list_del_init(&timer->it.cpu.entry); | 738 | if (unlikely(timer->it.cpu.firing)) { |
739 | timer->it.cpu.firing = -1; | ||
740 | ret = TIMER_RETRY; | ||
741 | } else | ||
742 | list_del_init(&timer->it.cpu.entry); | ||
736 | spin_unlock(&p->sighand->siglock); | 743 | spin_unlock(&p->sighand->siglock); |
737 | 744 | ||
738 | /* | 745 | /* |
@@ -780,7 +787,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
780 | } | 787 | } |
781 | } | 788 | } |
782 | 789 | ||
783 | if (unlikely(timer->it.cpu.firing)) { | 790 | if (unlikely(ret)) { |
784 | /* | 791 | /* |
785 | * We are colliding with the timer actually firing. | 792 | * We are colliding with the timer actually firing. |
786 | * Punt after filling in the timer's old value, and | 793 | * Punt after filling in the timer's old value, and |
@@ -788,8 +795,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
788 | * it as an overrun (thanks to bump_cpu_timer above). | 795 | * it as an overrun (thanks to bump_cpu_timer above). |
789 | */ | 796 | */ |
790 | read_unlock(&tasklist_lock); | 797 | read_unlock(&tasklist_lock); |
791 | timer->it.cpu.firing = -1; | ||
792 | ret = TIMER_RETRY; | ||
793 | goto out; | 798 | goto out; |
794 | } | 799 | } |
795 | 800 | ||
@@ -955,14 +960,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
955 | static void check_thread_timers(struct task_struct *tsk, | 960 | static void check_thread_timers(struct task_struct *tsk, |
956 | struct list_head *firing) | 961 | struct list_head *firing) |
957 | { | 962 | { |
963 | int maxfire; | ||
958 | struct list_head *timers = tsk->cpu_timers; | 964 | struct list_head *timers = tsk->cpu_timers; |
959 | 965 | ||
966 | maxfire = 20; | ||
960 | tsk->it_prof_expires = cputime_zero; | 967 | tsk->it_prof_expires = cputime_zero; |
961 | while (!list_empty(timers)) { | 968 | while (!list_empty(timers)) { |
962 | struct cpu_timer_list *t = list_entry(timers->next, | 969 | struct cpu_timer_list *t = list_entry(timers->next, |
963 | struct cpu_timer_list, | 970 | struct cpu_timer_list, |
964 | entry); | 971 | entry); |
965 | if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { | 972 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
966 | tsk->it_prof_expires = t->expires.cpu; | 973 | tsk->it_prof_expires = t->expires.cpu; |
967 | break; | 974 | break; |
968 | } | 975 | } |
@@ -971,12 +978,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
971 | } | 978 | } |
972 | 979 | ||
973 | ++timers; | 980 | ++timers; |
981 | maxfire = 20; | ||
974 | tsk->it_virt_expires = cputime_zero; | 982 | tsk->it_virt_expires = cputime_zero; |
975 | while (!list_empty(timers)) { | 983 | while (!list_empty(timers)) { |
976 | struct cpu_timer_list *t = list_entry(timers->next, | 984 | struct cpu_timer_list *t = list_entry(timers->next, |
977 | struct cpu_timer_list, | 985 | struct cpu_timer_list, |
978 | entry); | 986 | entry); |
979 | if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { | 987 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
980 | tsk->it_virt_expires = t->expires.cpu; | 988 | tsk->it_virt_expires = t->expires.cpu; |
981 | break; | 989 | break; |
982 | } | 990 | } |
@@ -985,12 +993,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
985 | } | 993 | } |
986 | 994 | ||
987 | ++timers; | 995 | ++timers; |
996 | maxfire = 20; | ||
988 | tsk->it_sched_expires = 0; | 997 | tsk->it_sched_expires = 0; |
989 | while (!list_empty(timers)) { | 998 | while (!list_empty(timers)) { |
990 | struct cpu_timer_list *t = list_entry(timers->next, | 999 | struct cpu_timer_list *t = list_entry(timers->next, |
991 | struct cpu_timer_list, | 1000 | struct cpu_timer_list, |
992 | entry); | 1001 | entry); |
993 | if (tsk->sched_time < t->expires.sched) { | 1002 | if (!--maxfire || tsk->sched_time < t->expires.sched) { |
994 | tsk->it_sched_expires = t->expires.sched; | 1003 | tsk->it_sched_expires = t->expires.sched; |
995 | break; | 1004 | break; |
996 | } | 1005 | } |
@@ -1007,6 +1016,7 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1007 | static void check_process_timers(struct task_struct *tsk, | 1016 | static void check_process_timers(struct task_struct *tsk, |
1008 | struct list_head *firing) | 1017 | struct list_head *firing) |
1009 | { | 1018 | { |
1019 | int maxfire; | ||
1010 | struct signal_struct *const sig = tsk->signal; | 1020 | struct signal_struct *const sig = tsk->signal; |
1011 | cputime_t utime, stime, ptime, virt_expires, prof_expires; | 1021 | cputime_t utime, stime, ptime, virt_expires, prof_expires; |
1012 | unsigned long long sched_time, sched_expires; | 1022 | unsigned long long sched_time, sched_expires; |
@@ -1039,12 +1049,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1039 | } while (t != tsk); | 1049 | } while (t != tsk); |
1040 | ptime = cputime_add(utime, stime); | 1050 | ptime = cputime_add(utime, stime); |
1041 | 1051 | ||
1052 | maxfire = 20; | ||
1042 | prof_expires = cputime_zero; | 1053 | prof_expires = cputime_zero; |
1043 | while (!list_empty(timers)) { | 1054 | while (!list_empty(timers)) { |
1044 | struct cpu_timer_list *t = list_entry(timers->next, | 1055 | struct cpu_timer_list *t = list_entry(timers->next, |
1045 | struct cpu_timer_list, | 1056 | struct cpu_timer_list, |
1046 | entry); | 1057 | entry); |
1047 | if (cputime_lt(ptime, t->expires.cpu)) { | 1058 | if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) { |
1048 | prof_expires = t->expires.cpu; | 1059 | prof_expires = t->expires.cpu; |
1049 | break; | 1060 | break; |
1050 | } | 1061 | } |
@@ -1053,12 +1064,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1053 | } | 1064 | } |
1054 | 1065 | ||
1055 | ++timers; | 1066 | ++timers; |
1067 | maxfire = 20; | ||
1056 | virt_expires = cputime_zero; | 1068 | virt_expires = cputime_zero; |
1057 | while (!list_empty(timers)) { | 1069 | while (!list_empty(timers)) { |
1058 | struct cpu_timer_list *t = list_entry(timers->next, | 1070 | struct cpu_timer_list *t = list_entry(timers->next, |
1059 | struct cpu_timer_list, | 1071 | struct cpu_timer_list, |
1060 | entry); | 1072 | entry); |
1061 | if (cputime_lt(utime, t->expires.cpu)) { | 1073 | if (!--maxfire || cputime_lt(utime, t->expires.cpu)) { |
1062 | virt_expires = t->expires.cpu; | 1074 | virt_expires = t->expires.cpu; |
1063 | break; | 1075 | break; |
1064 | } | 1076 | } |
@@ -1067,12 +1079,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1067 | } | 1079 | } |
1068 | 1080 | ||
1069 | ++timers; | 1081 | ++timers; |
1082 | maxfire = 20; | ||
1070 | sched_expires = 0; | 1083 | sched_expires = 0; |
1071 | while (!list_empty(timers)) { | 1084 | while (!list_empty(timers)) { |
1072 | struct cpu_timer_list *t = list_entry(timers->next, | 1085 | struct cpu_timer_list *t = list_entry(timers->next, |
1073 | struct cpu_timer_list, | 1086 | struct cpu_timer_list, |
1074 | entry); | 1087 | entry); |
1075 | if (sched_time < t->expires.sched) { | 1088 | if (!--maxfire || sched_time < t->expires.sched) { |
1076 | sched_expires = t->expires.sched; | 1089 | sched_expires = t->expires.sched; |
1077 | break; | 1090 | break; |
1078 | } | 1091 | } |
@@ -1155,6 +1168,9 @@ static void check_process_timers(struct task_struct *tsk, | |||
1155 | unsigned long long sched_left, sched; | 1168 | unsigned long long sched_left, sched; |
1156 | const unsigned int nthreads = atomic_read(&sig->live); | 1169 | const unsigned int nthreads = atomic_read(&sig->live); |
1157 | 1170 | ||
1171 | if (!nthreads) | ||
1172 | return; | ||
1173 | |||
1158 | prof_left = cputime_sub(prof_expires, utime); | 1174 | prof_left = cputime_sub(prof_expires, utime); |
1159 | prof_left = cputime_sub(prof_left, stime); | 1175 | prof_left = cputime_sub(prof_left, stime); |
1160 | prof_left = cputime_div(prof_left, nthreads); | 1176 | prof_left = cputime_div(prof_left, nthreads); |
@@ -1191,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1191 | 1207 | ||
1192 | do { | 1208 | do { |
1193 | t = next_thread(t); | 1209 | t = next_thread(t); |
1194 | } while (unlikely(t->exit_state)); | 1210 | } while (unlikely(t->flags & PF_EXITING)); |
1195 | } while (t != tsk); | 1211 | } while (t != tsk); |
1196 | } | 1212 | } |
1197 | } | 1213 | } |