aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c119
1 files changed, 67 insertions, 52 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index b3f3edc475de..b15462b17a58 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -380,28 +380,31 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
380int posix_cpu_timer_del(struct k_itimer *timer) 380int posix_cpu_timer_del(struct k_itimer *timer)
381{ 381{
382 struct task_struct *p = timer->it.cpu.task; 382 struct task_struct *p = timer->it.cpu.task;
383 int ret = 0;
383 384
384 if (timer->it.cpu.firing) 385 if (likely(p != NULL)) {
385 return TIMER_RETRY; 386 read_lock(&tasklist_lock);
386 387 if (unlikely(p->signal == NULL)) {
387 if (unlikely(p == NULL)) 388 /*
388 return 0; 389 * We raced with the reaping of the task.
390 * The deletion should have cleared us off the list.
391 */
392 BUG_ON(!list_empty(&timer->it.cpu.entry));
393 } else {
394 spin_lock(&p->sighand->siglock);
395 if (timer->it.cpu.firing)
396 ret = TIMER_RETRY;
397 else
398 list_del(&timer->it.cpu.entry);
399 spin_unlock(&p->sighand->siglock);
400 }
401 read_unlock(&tasklist_lock);
389 402
390 spin_lock(&p->sighand->siglock); 403 if (!ret)
391 if (!list_empty(&timer->it.cpu.entry)) { 404 put_task_struct(p);
392 /*
393 * Take us off the task's timer list. We don't need to
394 * take tasklist_lock and check for the task being reaped.
395 * If it was reaped, it already called posix_cpu_timers_exit
396 * and posix_cpu_timers_exit_group to clear all the timers
397 * that pointed to it.
398 */
399 list_del(&timer->it.cpu.entry);
400 put_task_struct(p);
401 } 405 }
402 spin_unlock(&p->sighand->siglock);
403 406
404 return 0; 407 return ret;
405} 408}
406 409
407/* 410/*
@@ -418,8 +421,6 @@ static void cleanup_timers(struct list_head *head,
418 cputime_t ptime = cputime_add(utime, stime); 421 cputime_t ptime = cputime_add(utime, stime);
419 422
420 list_for_each_entry_safe(timer, next, head, entry) { 423 list_for_each_entry_safe(timer, next, head, entry) {
421 put_task_struct(timer->task);
422 timer->task = NULL;
423 list_del_init(&timer->entry); 424 list_del_init(&timer->entry);
424 if (cputime_lt(timer->expires.cpu, ptime)) { 425 if (cputime_lt(timer->expires.cpu, ptime)) {
425 timer->expires.cpu = cputime_zero; 426 timer->expires.cpu = cputime_zero;
@@ -431,8 +432,6 @@ static void cleanup_timers(struct list_head *head,
431 432
432 ++head; 433 ++head;
433 list_for_each_entry_safe(timer, next, head, entry) { 434 list_for_each_entry_safe(timer, next, head, entry) {
434 put_task_struct(timer->task);
435 timer->task = NULL;
436 list_del_init(&timer->entry); 435 list_del_init(&timer->entry);
437 if (cputime_lt(timer->expires.cpu, utime)) { 436 if (cputime_lt(timer->expires.cpu, utime)) {
438 timer->expires.cpu = cputime_zero; 437 timer->expires.cpu = cputime_zero;
@@ -444,8 +443,6 @@ static void cleanup_timers(struct list_head *head,
444 443
445 ++head; 444 ++head;
446 list_for_each_entry_safe(timer, next, head, entry) { 445 list_for_each_entry_safe(timer, next, head, entry) {
447 put_task_struct(timer->task);
448 timer->task = NULL;
449 list_del_init(&timer->entry); 446 list_del_init(&timer->entry);
450 if (timer->expires.sched < sched_time) { 447 if (timer->expires.sched < sched_time) {
451 timer->expires.sched = 0; 448 timer->expires.sched = 0;
@@ -489,6 +486,9 @@ static void process_timer_rebalance(struct task_struct *p,
489 struct task_struct *t = p; 486 struct task_struct *t = p;
490 unsigned int nthreads = atomic_read(&p->signal->live); 487 unsigned int nthreads = atomic_read(&p->signal->live);
491 488
489 if (!nthreads)
490 return;
491
492 switch (clock_idx) { 492 switch (clock_idx) {
493 default: 493 default:
494 BUG(); 494 BUG();
@@ -730,9 +730,15 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
730 * Disarm any old timer after extracting its expiry time. 730 * Disarm any old timer after extracting its expiry time.
731 */ 731 */
732 BUG_ON(!irqs_disabled()); 732 BUG_ON(!irqs_disabled());
733
734 ret = 0;
733 spin_lock(&p->sighand->siglock); 735 spin_lock(&p->sighand->siglock);
734 old_expires = timer->it.cpu.expires; 736 old_expires = timer->it.cpu.expires;
735 list_del_init(&timer->it.cpu.entry); 737 if (unlikely(timer->it.cpu.firing)) {
738 timer->it.cpu.firing = -1;
739 ret = TIMER_RETRY;
740 } else
741 list_del_init(&timer->it.cpu.entry);
736 spin_unlock(&p->sighand->siglock); 742 spin_unlock(&p->sighand->siglock);
737 743
738 /* 744 /*
@@ -780,7 +786,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
780 } 786 }
781 } 787 }
782 788
783 if (unlikely(timer->it.cpu.firing)) { 789 if (unlikely(ret)) {
784 /* 790 /*
785 * We are colliding with the timer actually firing. 791 * We are colliding with the timer actually firing.
786 * Punt after filling in the timer's old value, and 792 * Punt after filling in the timer's old value, and
@@ -788,8 +794,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
788 * it as an overrun (thanks to bump_cpu_timer above). 794 * it as an overrun (thanks to bump_cpu_timer above).
789 */ 795 */
790 read_unlock(&tasklist_lock); 796 read_unlock(&tasklist_lock);
791 timer->it.cpu.firing = -1;
792 ret = TIMER_RETRY;
793 goto out; 797 goto out;
794 } 798 }
795 799
@@ -955,14 +959,16 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
955static void check_thread_timers(struct task_struct *tsk, 959static void check_thread_timers(struct task_struct *tsk,
956 struct list_head *firing) 960 struct list_head *firing)
957{ 961{
962 int maxfire;
958 struct list_head *timers = tsk->cpu_timers; 963 struct list_head *timers = tsk->cpu_timers;
959 964
965 maxfire = 20;
960 tsk->it_prof_expires = cputime_zero; 966 tsk->it_prof_expires = cputime_zero;
961 while (!list_empty(timers)) { 967 while (!list_empty(timers)) {
962 struct cpu_timer_list *t = list_entry(timers->next, 968 struct cpu_timer_list *t = list_entry(timers->next,
963 struct cpu_timer_list, 969 struct cpu_timer_list,
964 entry); 970 entry);
965 if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { 971 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
966 tsk->it_prof_expires = t->expires.cpu; 972 tsk->it_prof_expires = t->expires.cpu;
967 break; 973 break;
968 } 974 }
@@ -971,12 +977,13 @@ static void check_thread_timers(struct task_struct *tsk,
971 } 977 }
972 978
973 ++timers; 979 ++timers;
980 maxfire = 20;
974 tsk->it_virt_expires = cputime_zero; 981 tsk->it_virt_expires = cputime_zero;
975 while (!list_empty(timers)) { 982 while (!list_empty(timers)) {
976 struct cpu_timer_list *t = list_entry(timers->next, 983 struct cpu_timer_list *t = list_entry(timers->next,
977 struct cpu_timer_list, 984 struct cpu_timer_list,
978 entry); 985 entry);
979 if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { 986 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
980 tsk->it_virt_expires = t->expires.cpu; 987 tsk->it_virt_expires = t->expires.cpu;
981 break; 988 break;
982 } 989 }
@@ -985,12 +992,13 @@ static void check_thread_timers(struct task_struct *tsk,
985 } 992 }
986 993
987 ++timers; 994 ++timers;
995 maxfire = 20;
988 tsk->it_sched_expires = 0; 996 tsk->it_sched_expires = 0;
989 while (!list_empty(timers)) { 997 while (!list_empty(timers)) {
990 struct cpu_timer_list *t = list_entry(timers->next, 998 struct cpu_timer_list *t = list_entry(timers->next,
991 struct cpu_timer_list, 999 struct cpu_timer_list,
992 entry); 1000 entry);
993 if (tsk->sched_time < t->expires.sched) { 1001 if (!--maxfire || tsk->sched_time < t->expires.sched) {
994 tsk->it_sched_expires = t->expires.sched; 1002 tsk->it_sched_expires = t->expires.sched;
995 break; 1003 break;
996 } 1004 }
@@ -1007,6 +1015,7 @@ static void check_thread_timers(struct task_struct *tsk,
1007static void check_process_timers(struct task_struct *tsk, 1015static void check_process_timers(struct task_struct *tsk,
1008 struct list_head *firing) 1016 struct list_head *firing)
1009{ 1017{
1018 int maxfire;
1010 struct signal_struct *const sig = tsk->signal; 1019 struct signal_struct *const sig = tsk->signal;
1011 cputime_t utime, stime, ptime, virt_expires, prof_expires; 1020 cputime_t utime, stime, ptime, virt_expires, prof_expires;
1012 unsigned long long sched_time, sched_expires; 1021 unsigned long long sched_time, sched_expires;
@@ -1039,12 +1048,13 @@ static void check_process_timers(struct task_struct *tsk,
1039 } while (t != tsk); 1048 } while (t != tsk);
1040 ptime = cputime_add(utime, stime); 1049 ptime = cputime_add(utime, stime);
1041 1050
1051 maxfire = 20;
1042 prof_expires = cputime_zero; 1052 prof_expires = cputime_zero;
1043 while (!list_empty(timers)) { 1053 while (!list_empty(timers)) {
1044 struct cpu_timer_list *t = list_entry(timers->next, 1054 struct cpu_timer_list *t = list_entry(timers->next,
1045 struct cpu_timer_list, 1055 struct cpu_timer_list,
1046 entry); 1056 entry);
1047 if (cputime_lt(ptime, t->expires.cpu)) { 1057 if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) {
1048 prof_expires = t->expires.cpu; 1058 prof_expires = t->expires.cpu;
1049 break; 1059 break;
1050 } 1060 }
@@ -1053,12 +1063,13 @@ static void check_process_timers(struct task_struct *tsk,
1053 } 1063 }
1054 1064
1055 ++timers; 1065 ++timers;
1066 maxfire = 20;
1056 virt_expires = cputime_zero; 1067 virt_expires = cputime_zero;
1057 while (!list_empty(timers)) { 1068 while (!list_empty(timers)) {
1058 struct cpu_timer_list *t = list_entry(timers->next, 1069 struct cpu_timer_list *t = list_entry(timers->next,
1059 struct cpu_timer_list, 1070 struct cpu_timer_list,
1060 entry); 1071 entry);
1061 if (cputime_lt(utime, t->expires.cpu)) { 1072 if (!--maxfire || cputime_lt(utime, t->expires.cpu)) {
1062 virt_expires = t->expires.cpu; 1073 virt_expires = t->expires.cpu;
1063 break; 1074 break;
1064 } 1075 }
@@ -1067,12 +1078,13 @@ static void check_process_timers(struct task_struct *tsk,
1067 } 1078 }
1068 1079
1069 ++timers; 1080 ++timers;
1081 maxfire = 20;
1070 sched_expires = 0; 1082 sched_expires = 0;
1071 while (!list_empty(timers)) { 1083 while (!list_empty(timers)) {
1072 struct cpu_timer_list *t = list_entry(timers->next, 1084 struct cpu_timer_list *t = list_entry(timers->next,
1073 struct cpu_timer_list, 1085 struct cpu_timer_list,
1074 entry); 1086 entry);
1075 if (sched_time < t->expires.sched) { 1087 if (!--maxfire || sched_time < t->expires.sched) {
1076 sched_expires = t->expires.sched; 1088 sched_expires = t->expires.sched;
1077 break; 1089 break;
1078 } 1090 }
@@ -1155,6 +1167,9 @@ static void check_process_timers(struct task_struct *tsk,
1155 unsigned long long sched_left, sched; 1167 unsigned long long sched_left, sched;
1156 const unsigned int nthreads = atomic_read(&sig->live); 1168 const unsigned int nthreads = atomic_read(&sig->live);
1157 1169
1170 if (!nthreads)
1171 return;
1172
1158 prof_left = cputime_sub(prof_expires, utime); 1173 prof_left = cputime_sub(prof_expires, utime);
1159 prof_left = cputime_sub(prof_left, stime); 1174 prof_left = cputime_sub(prof_left, stime);
1160 prof_left = cputime_div(prof_left, nthreads); 1175 prof_left = cputime_div(prof_left, nthreads);
@@ -1280,30 +1295,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1280 1295
1281#undef UNEXPIRED 1296#undef UNEXPIRED
1282 1297
1283 BUG_ON(tsk->exit_state);
1284
1285 /* 1298 /*
1286 * Double-check with locks held. 1299 * Double-check with locks held.
1287 */ 1300 */
1288 read_lock(&tasklist_lock); 1301 read_lock(&tasklist_lock);
1289 spin_lock(&tsk->sighand->siglock); 1302 if (likely(tsk->signal != NULL)) {
1303 spin_lock(&tsk->sighand->siglock);
1290 1304
1291 /* 1305 /*
1292 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] 1306 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
1293 * all the timers that are firing, and put them on the firing list. 1307 * all the timers that are firing, and put them on the firing list.
1294 */ 1308 */
1295 check_thread_timers(tsk, &firing); 1309 check_thread_timers(tsk, &firing);
1296 check_process_timers(tsk, &firing); 1310 check_process_timers(tsk, &firing);
1297 1311
1298 /* 1312 /*
1299 * We must release these locks before taking any timer's lock. 1313 * We must release these locks before taking any timer's lock.
1300 * There is a potential race with timer deletion here, as the 1314 * There is a potential race with timer deletion here, as the
1301 * siglock now protects our private firing list. We have set 1315 * siglock now protects our private firing list. We have set
1302 * the firing flag in each timer, so that a deletion attempt 1316 * the firing flag in each timer, so that a deletion attempt
1303 * that gets the timer lock before we do will give it up and 1317 * that gets the timer lock before we do will give it up and
1304 * spin until we've taken care of that timer below. 1318 * spin until we've taken care of that timer below.
1305 */ 1319 */
1306 spin_unlock(&tsk->sighand->siglock); 1320 spin_unlock(&tsk->sighand->siglock);
1321 }
1307 read_unlock(&tasklist_lock); 1322 read_unlock(&tasklist_lock);
1308 1323
1309 /* 1324 /*