diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 119 |
1 files changed, 63 insertions, 56 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 7a51a5597c33..383ba22f0b62 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, | |||
91 | * Update expiry time from increment, and increase overrun count, | 91 | * Update expiry time from increment, and increase overrun count, |
92 | * given the current clock sample. | 92 | * given the current clock sample. |
93 | */ | 93 | */ |
94 | static inline void bump_cpu_timer(struct k_itimer *timer, | 94 | static void bump_cpu_timer(struct k_itimer *timer, |
95 | union cpu_time_count now) | 95 | union cpu_time_count now) |
96 | { | 96 | { |
97 | int i; | 97 | int i; |
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
110 | for (i = 0; incr < delta - incr; i++) | 110 | for (i = 0; incr < delta - incr; i++) |
111 | incr = incr << 1; | 111 | incr = incr << 1; |
112 | for (; i >= 0; incr >>= 1, i--) { | 112 | for (; i >= 0; incr >>= 1, i--) { |
113 | if (delta <= incr) | 113 | if (delta < incr) |
114 | continue; | 114 | continue; |
115 | timer->it.cpu.expires.sched += incr; | 115 | timer->it.cpu.expires.sched += incr; |
116 | timer->it_overrun += 1 << i; | 116 | timer->it_overrun += 1 << i; |
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | 128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) |
129 | incr = cputime_add(incr, incr); | 129 | incr = cputime_add(incr, incr); |
130 | for (; i >= 0; incr = cputime_halve(incr), i--) { | 130 | for (; i >= 0; incr = cputime_halve(incr), i--) { |
131 | if (cputime_le(delta, incr)) | 131 | if (cputime_lt(delta, incr)) |
132 | continue; | 132 | continue; |
133 | timer->it.cpu.expires.cpu = | 133 | timer->it.cpu.expires.cpu = |
134 | cputime_add(timer->it.cpu.expires.cpu, incr); | 134 | cputime_add(timer->it.cpu.expires.cpu, incr); |
@@ -380,14 +380,9 @@ int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
380 | int posix_cpu_timer_del(struct k_itimer *timer) | 380 | int posix_cpu_timer_del(struct k_itimer *timer) |
381 | { | 381 | { |
382 | struct task_struct *p = timer->it.cpu.task; | 382 | struct task_struct *p = timer->it.cpu.task; |
383 | int ret = 0; | ||
383 | 384 | ||
384 | if (timer->it.cpu.firing) | 385 | if (likely(p != NULL)) { |
385 | return TIMER_RETRY; | ||
386 | |||
387 | if (unlikely(p == NULL)) | ||
388 | return 0; | ||
389 | |||
390 | if (!list_empty(&timer->it.cpu.entry)) { | ||
391 | read_lock(&tasklist_lock); | 386 | read_lock(&tasklist_lock); |
392 | if (unlikely(p->signal == NULL)) { | 387 | if (unlikely(p->signal == NULL)) { |
393 | /* | 388 | /* |
@@ -396,18 +391,20 @@ int posix_cpu_timer_del(struct k_itimer *timer) | |||
396 | */ | 391 | */ |
397 | BUG_ON(!list_empty(&timer->it.cpu.entry)); | 392 | BUG_ON(!list_empty(&timer->it.cpu.entry)); |
398 | } else { | 393 | } else { |
399 | /* | ||
400 | * Take us off the task's timer list. | ||
401 | */ | ||
402 | spin_lock(&p->sighand->siglock); | 394 | spin_lock(&p->sighand->siglock); |
403 | list_del(&timer->it.cpu.entry); | 395 | if (timer->it.cpu.firing) |
396 | ret = TIMER_RETRY; | ||
397 | else | ||
398 | list_del(&timer->it.cpu.entry); | ||
404 | spin_unlock(&p->sighand->siglock); | 399 | spin_unlock(&p->sighand->siglock); |
405 | } | 400 | } |
406 | read_unlock(&tasklist_lock); | 401 | read_unlock(&tasklist_lock); |
402 | |||
403 | if (!ret) | ||
404 | put_task_struct(p); | ||
407 | } | 405 | } |
408 | put_task_struct(p); | ||
409 | 406 | ||
410 | return 0; | 407 | return ret; |
411 | } | 408 | } |
412 | 409 | ||
413 | /* | 410 | /* |
@@ -424,8 +421,6 @@ static void cleanup_timers(struct list_head *head, | |||
424 | cputime_t ptime = cputime_add(utime, stime); | 421 | cputime_t ptime = cputime_add(utime, stime); |
425 | 422 | ||
426 | list_for_each_entry_safe(timer, next, head, entry) { | 423 | list_for_each_entry_safe(timer, next, head, entry) { |
427 | put_task_struct(timer->task); | ||
428 | timer->task = NULL; | ||
429 | list_del_init(&timer->entry); | 424 | list_del_init(&timer->entry); |
430 | if (cputime_lt(timer->expires.cpu, ptime)) { | 425 | if (cputime_lt(timer->expires.cpu, ptime)) { |
431 | timer->expires.cpu = cputime_zero; | 426 | timer->expires.cpu = cputime_zero; |
@@ -437,8 +432,6 @@ static void cleanup_timers(struct list_head *head, | |||
437 | 432 | ||
438 | ++head; | 433 | ++head; |
439 | list_for_each_entry_safe(timer, next, head, entry) { | 434 | list_for_each_entry_safe(timer, next, head, entry) { |
440 | put_task_struct(timer->task); | ||
441 | timer->task = NULL; | ||
442 | list_del_init(&timer->entry); | 435 | list_del_init(&timer->entry); |
443 | if (cputime_lt(timer->expires.cpu, utime)) { | 436 | if (cputime_lt(timer->expires.cpu, utime)) { |
444 | timer->expires.cpu = cputime_zero; | 437 | timer->expires.cpu = cputime_zero; |
@@ -450,8 +443,6 @@ static void cleanup_timers(struct list_head *head, | |||
450 | 443 | ||
451 | ++head; | 444 | ++head; |
452 | list_for_each_entry_safe(timer, next, head, entry) { | 445 | list_for_each_entry_safe(timer, next, head, entry) { |
453 | put_task_struct(timer->task); | ||
454 | timer->task = NULL; | ||
455 | list_del_init(&timer->entry); | 446 | list_del_init(&timer->entry); |
456 | if (timer->expires.sched < sched_time) { | 447 | if (timer->expires.sched < sched_time) { |
457 | timer->expires.sched = 0; | 448 | timer->expires.sched = 0; |
@@ -495,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p, | |||
495 | struct task_struct *t = p; | 486 | struct task_struct *t = p; |
496 | unsigned int nthreads = atomic_read(&p->signal->live); | 487 | unsigned int nthreads = atomic_read(&p->signal->live); |
497 | 488 | ||
489 | if (!nthreads) | ||
490 | return; | ||
491 | |||
498 | switch (clock_idx) { | 492 | switch (clock_idx) { |
499 | default: | 493 | default: |
500 | BUG(); | 494 | BUG(); |
@@ -582,17 +576,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
582 | listpos = head; | 576 | listpos = head; |
583 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 577 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
584 | list_for_each_entry(next, head, entry) { | 578 | list_for_each_entry(next, head, entry) { |
585 | if (next->expires.sched > nt->expires.sched) { | 579 | if (next->expires.sched > nt->expires.sched) |
586 | listpos = &next->entry; | ||
587 | break; | 580 | break; |
588 | } | 581 | listpos = &next->entry; |
589 | } | 582 | } |
590 | } else { | 583 | } else { |
591 | list_for_each_entry(next, head, entry) { | 584 | list_for_each_entry(next, head, entry) { |
592 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { | 585 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) |
593 | listpos = &next->entry; | ||
594 | break; | 586 | break; |
595 | } | 587 | listpos = &next->entry; |
596 | } | 588 | } |
597 | } | 589 | } |
598 | list_add(&nt->entry, listpos); | 590 | list_add(&nt->entry, listpos); |
@@ -736,9 +728,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
736 | * Disarm any old timer after extracting its expiry time. | 728 | * Disarm any old timer after extracting its expiry time. |
737 | */ | 729 | */ |
738 | BUG_ON(!irqs_disabled()); | 730 | BUG_ON(!irqs_disabled()); |
731 | |||
732 | ret = 0; | ||
739 | spin_lock(&p->sighand->siglock); | 733 | spin_lock(&p->sighand->siglock); |
740 | old_expires = timer->it.cpu.expires; | 734 | old_expires = timer->it.cpu.expires; |
741 | list_del_init(&timer->it.cpu.entry); | 735 | if (unlikely(timer->it.cpu.firing)) { |
736 | timer->it.cpu.firing = -1; | ||
737 | ret = TIMER_RETRY; | ||
738 | } else | ||
739 | list_del_init(&timer->it.cpu.entry); | ||
742 | spin_unlock(&p->sighand->siglock); | 740 | spin_unlock(&p->sighand->siglock); |
743 | 741 | ||
744 | /* | 742 | /* |
@@ -786,7 +784,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
786 | } | 784 | } |
787 | } | 785 | } |
788 | 786 | ||
789 | if (unlikely(timer->it.cpu.firing)) { | 787 | if (unlikely(ret)) { |
790 | /* | 788 | /* |
791 | * We are colliding with the timer actually firing. | 789 | * We are colliding with the timer actually firing. |
792 | * Punt after filling in the timer's old value, and | 790 | * Punt after filling in the timer's old value, and |
@@ -794,8 +792,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
794 | * it as an overrun (thanks to bump_cpu_timer above). | 792 | * it as an overrun (thanks to bump_cpu_timer above). |
795 | */ | 793 | */ |
796 | read_unlock(&tasklist_lock); | 794 | read_unlock(&tasklist_lock); |
797 | timer->it.cpu.firing = -1; | ||
798 | ret = TIMER_RETRY; | ||
799 | goto out; | 795 | goto out; |
800 | } | 796 | } |
801 | 797 | ||
@@ -961,14 +957,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
961 | static void check_thread_timers(struct task_struct *tsk, | 957 | static void check_thread_timers(struct task_struct *tsk, |
962 | struct list_head *firing) | 958 | struct list_head *firing) |
963 | { | 959 | { |
960 | int maxfire; | ||
964 | struct list_head *timers = tsk->cpu_timers; | 961 | struct list_head *timers = tsk->cpu_timers; |
965 | 962 | ||
963 | maxfire = 20; | ||
966 | tsk->it_prof_expires = cputime_zero; | 964 | tsk->it_prof_expires = cputime_zero; |
967 | while (!list_empty(timers)) { | 965 | while (!list_empty(timers)) { |
968 | struct cpu_timer_list *t = list_entry(timers->next, | 966 | struct cpu_timer_list *t = list_entry(timers->next, |
969 | struct cpu_timer_list, | 967 | struct cpu_timer_list, |
970 | entry); | 968 | entry); |
971 | if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { | 969 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
972 | tsk->it_prof_expires = t->expires.cpu; | 970 | tsk->it_prof_expires = t->expires.cpu; |
973 | break; | 971 | break; |
974 | } | 972 | } |
@@ -977,12 +975,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
977 | } | 975 | } |
978 | 976 | ||
979 | ++timers; | 977 | ++timers; |
978 | maxfire = 20; | ||
980 | tsk->it_virt_expires = cputime_zero; | 979 | tsk->it_virt_expires = cputime_zero; |
981 | while (!list_empty(timers)) { | 980 | while (!list_empty(timers)) { |
982 | struct cpu_timer_list *t = list_entry(timers->next, | 981 | struct cpu_timer_list *t = list_entry(timers->next, |
983 | struct cpu_timer_list, | 982 | struct cpu_timer_list, |
984 | entry); | 983 | entry); |
985 | if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { | 984 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
986 | tsk->it_virt_expires = t->expires.cpu; | 985 | tsk->it_virt_expires = t->expires.cpu; |
987 | break; | 986 | break; |
988 | } | 987 | } |
@@ -991,12 +990,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
991 | } | 990 | } |
992 | 991 | ||
993 | ++timers; | 992 | ++timers; |
993 | maxfire = 20; | ||
994 | tsk->it_sched_expires = 0; | 994 | tsk->it_sched_expires = 0; |
995 | while (!list_empty(timers)) { | 995 | while (!list_empty(timers)) { |
996 | struct cpu_timer_list *t = list_entry(timers->next, | 996 | struct cpu_timer_list *t = list_entry(timers->next, |
997 | struct cpu_timer_list, | 997 | struct cpu_timer_list, |
998 | entry); | 998 | entry); |
999 | if (tsk->sched_time < t->expires.sched) { | 999 | if (!--maxfire || tsk->sched_time < t->expires.sched) { |
1000 | tsk->it_sched_expires = t->expires.sched; | 1000 | tsk->it_sched_expires = t->expires.sched; |
1001 | break; | 1001 | break; |
1002 | } | 1002 | } |
@@ -1013,6 +1013,7 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1013 | static void check_process_timers(struct task_struct *tsk, | 1013 | static void check_process_timers(struct task_struct *tsk, |
1014 | struct list_head *firing) | 1014 | struct list_head *firing) |
1015 | { | 1015 | { |
1016 | int maxfire; | ||
1016 | struct signal_struct *const sig = tsk->signal; | 1017 | struct signal_struct *const sig = tsk->signal; |
1017 | cputime_t utime, stime, ptime, virt_expires, prof_expires; | 1018 | cputime_t utime, stime, ptime, virt_expires, prof_expires; |
1018 | unsigned long long sched_time, sched_expires; | 1019 | unsigned long long sched_time, sched_expires; |
@@ -1045,12 +1046,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1045 | } while (t != tsk); | 1046 | } while (t != tsk); |
1046 | ptime = cputime_add(utime, stime); | 1047 | ptime = cputime_add(utime, stime); |
1047 | 1048 | ||
1049 | maxfire = 20; | ||
1048 | prof_expires = cputime_zero; | 1050 | prof_expires = cputime_zero; |
1049 | while (!list_empty(timers)) { | 1051 | while (!list_empty(timers)) { |
1050 | struct cpu_timer_list *t = list_entry(timers->next, | 1052 | struct cpu_timer_list *t = list_entry(timers->next, |
1051 | struct cpu_timer_list, | 1053 | struct cpu_timer_list, |
1052 | entry); | 1054 | entry); |
1053 | if (cputime_lt(ptime, t->expires.cpu)) { | 1055 | if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) { |
1054 | prof_expires = t->expires.cpu; | 1056 | prof_expires = t->expires.cpu; |
1055 | break; | 1057 | break; |
1056 | } | 1058 | } |
@@ -1059,12 +1061,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1059 | } | 1061 | } |
1060 | 1062 | ||
1061 | ++timers; | 1063 | ++timers; |
1064 | maxfire = 20; | ||
1062 | virt_expires = cputime_zero; | 1065 | virt_expires = cputime_zero; |
1063 | while (!list_empty(timers)) { | 1066 | while (!list_empty(timers)) { |
1064 | struct cpu_timer_list *t = list_entry(timers->next, | 1067 | struct cpu_timer_list *t = list_entry(timers->next, |
1065 | struct cpu_timer_list, | 1068 | struct cpu_timer_list, |
1066 | entry); | 1069 | entry); |
1067 | if (cputime_lt(utime, t->expires.cpu)) { | 1070 | if (!--maxfire || cputime_lt(utime, t->expires.cpu)) { |
1068 | virt_expires = t->expires.cpu; | 1071 | virt_expires = t->expires.cpu; |
1069 | break; | 1072 | break; |
1070 | } | 1073 | } |
@@ -1073,12 +1076,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
1073 | } | 1076 | } |
1074 | 1077 | ||
1075 | ++timers; | 1078 | ++timers; |
1079 | maxfire = 20; | ||
1076 | sched_expires = 0; | 1080 | sched_expires = 0; |
1077 | while (!list_empty(timers)) { | 1081 | while (!list_empty(timers)) { |
1078 | struct cpu_timer_list *t = list_entry(timers->next, | 1082 | struct cpu_timer_list *t = list_entry(timers->next, |
1079 | struct cpu_timer_list, | 1083 | struct cpu_timer_list, |
1080 | entry); | 1084 | entry); |
1081 | if (sched_time < t->expires.sched) { | 1085 | if (!--maxfire || sched_time < t->expires.sched) { |
1082 | sched_expires = t->expires.sched; | 1086 | sched_expires = t->expires.sched; |
1083 | break; | 1087 | break; |
1084 | } | 1088 | } |
@@ -1161,6 +1165,9 @@ static void check_process_timers(struct task_struct *tsk, | |||
1161 | unsigned long long sched_left, sched; | 1165 | unsigned long long sched_left, sched; |
1162 | const unsigned int nthreads = atomic_read(&sig->live); | 1166 | const unsigned int nthreads = atomic_read(&sig->live); |
1163 | 1167 | ||
1168 | if (!nthreads) | ||
1169 | return; | ||
1170 | |||
1164 | prof_left = cputime_sub(prof_expires, utime); | 1171 | prof_left = cputime_sub(prof_expires, utime); |
1165 | prof_left = cputime_sub(prof_left, stime); | 1172 | prof_left = cputime_sub(prof_left, stime); |
1166 | prof_left = cputime_div(prof_left, nthreads); | 1173 | prof_left = cputime_div(prof_left, nthreads); |
@@ -1286,30 +1293,30 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1286 | 1293 | ||
1287 | #undef UNEXPIRED | 1294 | #undef UNEXPIRED |
1288 | 1295 | ||
1289 | BUG_ON(tsk->exit_state); | ||
1290 | |||
1291 | /* | 1296 | /* |
1292 | * Double-check with locks held. | 1297 | * Double-check with locks held. |
1293 | */ | 1298 | */ |
1294 | read_lock(&tasklist_lock); | 1299 | read_lock(&tasklist_lock); |
1295 | spin_lock(&tsk->sighand->siglock); | 1300 | if (likely(tsk->signal != NULL)) { |
1301 | spin_lock(&tsk->sighand->siglock); | ||
1296 | 1302 | ||
1297 | /* | 1303 | /* |
1298 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] | 1304 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] |
1299 | * all the timers that are firing, and put them on the firing list. | 1305 | * all the timers that are firing, and put them on the firing list. |
1300 | */ | 1306 | */ |
1301 | check_thread_timers(tsk, &firing); | 1307 | check_thread_timers(tsk, &firing); |
1302 | check_process_timers(tsk, &firing); | 1308 | check_process_timers(tsk, &firing); |
1303 | 1309 | ||
1304 | /* | 1310 | /* |
1305 | * We must release these locks before taking any timer's lock. | 1311 | * We must release these locks before taking any timer's lock. |
1306 | * There is a potential race with timer deletion here, as the | 1312 | * There is a potential race with timer deletion here, as the |
1307 | * siglock now protects our private firing list. We have set | 1313 | * siglock now protects our private firing list. We have set |
1308 | * the firing flag in each timer, so that a deletion attempt | 1314 | * the firing flag in each timer, so that a deletion attempt |
1309 | * that gets the timer lock before we do will give it up and | 1315 | * that gets the timer lock before we do will give it up and |
1310 | * spin until we've taken care of that timer below. | 1316 | * spin until we've taken care of that timer below. |
1311 | */ | 1317 | */ |
1312 | spin_unlock(&tsk->sighand->siglock); | 1318 | spin_unlock(&tsk->sighand->siglock); |
1319 | } | ||
1313 | read_unlock(&tasklist_lock); | 1320 | read_unlock(&tasklist_lock); |
1314 | 1321 | ||
1315 | /* | 1322 | /* |