diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 63 |
1 files changed, 32 insertions, 31 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index b15462b17a58..bf374fceb39c 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -91,7 +91,7 @@ static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, | |||
91 | * Update expiry time from increment, and increase overrun count, | 91 | * Update expiry time from increment, and increase overrun count, |
92 | * given the current clock sample. | 92 | * given the current clock sample. |
93 | */ | 93 | */ |
94 | static inline void bump_cpu_timer(struct k_itimer *timer, | 94 | static void bump_cpu_timer(struct k_itimer *timer, |
95 | union cpu_time_count now) | 95 | union cpu_time_count now) |
96 | { | 96 | { |
97 | int i; | 97 | int i; |
@@ -110,7 +110,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
110 | for (i = 0; incr < delta - incr; i++) | 110 | for (i = 0; incr < delta - incr; i++) |
111 | incr = incr << 1; | 111 | incr = incr << 1; |
112 | for (; i >= 0; incr >>= 1, i--) { | 112 | for (; i >= 0; incr >>= 1, i--) { |
113 | if (delta <= incr) | 113 | if (delta < incr) |
114 | continue; | 114 | continue; |
115 | timer->it.cpu.expires.sched += incr; | 115 | timer->it.cpu.expires.sched += incr; |
116 | timer->it_overrun += 1 << i; | 116 | timer->it_overrun += 1 << i; |
@@ -128,7 +128,7 @@ static inline void bump_cpu_timer(struct k_itimer *timer, | |||
128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | 128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) |
129 | incr = cputime_add(incr, incr); | 129 | incr = cputime_add(incr, incr); |
130 | for (; i >= 0; incr = cputime_halve(incr), i--) { | 130 | for (; i >= 0; incr = cputime_halve(incr), i--) { |
131 | if (cputime_le(delta, incr)) | 131 | if (cputime_lt(delta, incr)) |
132 | continue; | 132 | continue; |
133 | timer->it.cpu.expires.cpu = | 133 | timer->it.cpu.expires.cpu = |
134 | cputime_add(timer->it.cpu.expires.cpu, incr); | 134 | cputime_add(timer->it.cpu.expires.cpu, incr); |
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
497 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 497 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
498 | nthreads); | 498 | nthreads); |
499 | do { | 499 | do { |
500 | if (!unlikely(t->exit_state)) { | 500 | if (!unlikely(t->flags & PF_EXITING)) { |
501 | ticks = cputime_add(prof_ticks(t), left); | 501 | ticks = cputime_add(prof_ticks(t), left); |
502 | if (cputime_eq(t->it_prof_expires, | 502 | if (cputime_eq(t->it_prof_expires, |
503 | cputime_zero) || | 503 | cputime_zero) || |
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
512 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 512 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
513 | nthreads); | 513 | nthreads); |
514 | do { | 514 | do { |
515 | if (!unlikely(t->exit_state)) { | 515 | if (!unlikely(t->flags & PF_EXITING)) { |
516 | ticks = cputime_add(virt_ticks(t), left); | 516 | ticks = cputime_add(virt_ticks(t), left); |
517 | if (cputime_eq(t->it_virt_expires, | 517 | if (cputime_eq(t->it_virt_expires, |
518 | cputime_zero) || | 518 | cputime_zero) || |
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
527 | nsleft = expires.sched - val.sched; | 527 | nsleft = expires.sched - val.sched; |
528 | do_div(nsleft, nthreads); | 528 | do_div(nsleft, nthreads); |
529 | do { | 529 | do { |
530 | if (!unlikely(t->exit_state)) { | 530 | if (!unlikely(t->flags & PF_EXITING)) { |
531 | ns = t->sched_time + nsleft; | 531 | ns = t->sched_time + nsleft; |
532 | if (t->it_sched_expires == 0 || | 532 | if (t->it_sched_expires == 0 || |
533 | t->it_sched_expires > ns) { | 533 | t->it_sched_expires > ns) { |
@@ -566,6 +566,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
566 | struct cpu_timer_list *next; | 566 | struct cpu_timer_list *next; |
567 | unsigned long i; | 567 | unsigned long i; |
568 | 568 | ||
569 | if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING)) | ||
570 | return; | ||
571 | |||
569 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? | 572 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? |
570 | p->cpu_timers : p->signal->cpu_timers); | 573 | p->cpu_timers : p->signal->cpu_timers); |
571 | head += CPUCLOCK_WHICH(timer->it_clock); | 574 | head += CPUCLOCK_WHICH(timer->it_clock); |
@@ -576,17 +579,15 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
576 | listpos = head; | 579 | listpos = head; |
577 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 580 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
578 | list_for_each_entry(next, head, entry) { | 581 | list_for_each_entry(next, head, entry) { |
579 | if (next->expires.sched > nt->expires.sched) { | 582 | if (next->expires.sched > nt->expires.sched) |
580 | listpos = &next->entry; | ||
581 | break; | 583 | break; |
582 | } | 584 | listpos = &next->entry; |
583 | } | 585 | } |
584 | } else { | 586 | } else { |
585 | list_for_each_entry(next, head, entry) { | 587 | list_for_each_entry(next, head, entry) { |
586 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { | 588 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) |
587 | listpos = &next->entry; | ||
588 | break; | 589 | break; |
589 | } | 590 | listpos = &next->entry; |
590 | } | 591 | } |
591 | } | 592 | } |
592 | list_add(&nt->entry, listpos); | 593 | list_add(&nt->entry, listpos); |
@@ -1206,7 +1207,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1206 | 1207 | ||
1207 | do { | 1208 | do { |
1208 | t = next_thread(t); | 1209 | t = next_thread(t); |
1209 | } while (unlikely(t->exit_state)); | 1210 | } while (unlikely(t->flags & PF_EXITING)); |
1210 | } while (t != tsk); | 1211 | } while (t != tsk); |
1211 | } | 1212 | } |
1212 | } | 1213 | } |
@@ -1295,30 +1296,30 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1295 | 1296 | ||
1296 | #undef UNEXPIRED | 1297 | #undef UNEXPIRED |
1297 | 1298 | ||
1299 | BUG_ON(tsk->exit_state); | ||
1300 | |||
1298 | /* | 1301 | /* |
1299 | * Double-check with locks held. | 1302 | * Double-check with locks held. |
1300 | */ | 1303 | */ |
1301 | read_lock(&tasklist_lock); | 1304 | read_lock(&tasklist_lock); |
1302 | if (likely(tsk->signal != NULL)) { | 1305 | spin_lock(&tsk->sighand->siglock); |
1303 | spin_lock(&tsk->sighand->siglock); | ||
1304 | 1306 | ||
1305 | /* | 1307 | /* |
1306 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] | 1308 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] |
1307 | * all the timers that are firing, and put them on the firing list. | 1309 | * all the timers that are firing, and put them on the firing list. |
1308 | */ | 1310 | */ |
1309 | check_thread_timers(tsk, &firing); | 1311 | check_thread_timers(tsk, &firing); |
1310 | check_process_timers(tsk, &firing); | 1312 | check_process_timers(tsk, &firing); |
1311 | 1313 | ||
1312 | /* | 1314 | /* |
1313 | * We must release these locks before taking any timer's lock. | 1315 | * We must release these locks before taking any timer's lock. |
1314 | * There is a potential race with timer deletion here, as the | 1316 | * There is a potential race with timer deletion here, as the |
1315 | * siglock now protects our private firing list. We have set | 1317 | * siglock now protects our private firing list. We have set |
1316 | * the firing flag in each timer, so that a deletion attempt | 1318 | * the firing flag in each timer, so that a deletion attempt |
1317 | * that gets the timer lock before we do will give it up and | 1319 | * that gets the timer lock before we do will give it up and |
1318 | * spin until we've taken care of that timer below. | 1320 | * spin until we've taken care of that timer below. |
1319 | */ | 1321 | */ |
1320 | spin_unlock(&tsk->sighand->siglock); | 1322 | spin_unlock(&tsk->sighand->siglock); |
1321 | } | ||
1322 | read_unlock(&tasklist_lock); | 1323 | read_unlock(&tasklist_lock); |
1323 | 1324 | ||
1324 | /* | 1325 | /* |