aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r--kernel/posix-timers.c140
1 files changed, 47 insertions, 93 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index d3c66b53dff6..b931d7cedbfa 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -313,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info)
313 313
314int posix_timer_event(struct k_itimer *timr, int si_private) 314int posix_timer_event(struct k_itimer *timr, int si_private)
315{ 315{
316 int shared, ret;
316 /* 317 /*
317 * FIXME: if ->sigq is queued we can race with 318 * FIXME: if ->sigq is queued we can race with
318 * dequeue_signal()->do_schedule_next_timer(). 319 * dequeue_signal()->do_schedule_next_timer().
@@ -326,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
326 */ 327 */
327 timr->sigq->info.si_sys_private = si_private; 328 timr->sigq->info.si_sys_private = si_private;
328 329
329 timr->sigq->info.si_signo = timr->it_sigev_signo; 330 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
330 timr->sigq->info.si_code = SI_TIMER; 331 ret = send_sigqueue(timr->sigq, timr->it_process, shared);
331 timr->sigq->info.si_tid = timr->it_id; 332 /* If we failed to send the signal the timer stops. */
332 timr->sigq->info.si_value = timr->it_sigev_value; 333 return ret > 0;
333
334 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
335 struct task_struct *leader;
336 int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
337
338 if (likely(ret >= 0))
339 return ret;
340
341 timr->it_sigev_notify = SIGEV_SIGNAL;
342 leader = timr->it_process->group_leader;
343 put_task_struct(timr->it_process);
344 timr->it_process = leader;
345 }
346
347 return send_sigqueue(timr->sigq, timr->it_process, 1);
348} 334}
349EXPORT_SYMBOL_GPL(posix_timer_event); 335EXPORT_SYMBOL_GPL(posix_timer_event);
350 336
@@ -456,7 +442,7 @@ static struct k_itimer * alloc_posix_timer(void)
456 return tmr; 442 return tmr;
457 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 443 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
458 kmem_cache_free(posix_timers_cache, tmr); 444 kmem_cache_free(posix_timers_cache, tmr);
459 tmr = NULL; 445 return NULL;
460 } 446 }
461 memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); 447 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
462 return tmr; 448 return tmr;
@@ -483,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
483 struct sigevent __user *timer_event_spec, 469 struct sigevent __user *timer_event_spec,
484 timer_t __user * created_timer_id) 470 timer_t __user * created_timer_id)
485{ 471{
486 int error = 0; 472 struct k_itimer *new_timer;
487 struct k_itimer *new_timer = NULL; 473 int error, new_timer_id;
488 int new_timer_id; 474 struct task_struct *process;
489 struct task_struct *process = NULL;
490 unsigned long flags;
491 sigevent_t event; 475 sigevent_t event;
492 int it_id_set = IT_ID_NOT_SET; 476 int it_id_set = IT_ID_NOT_SET;
493 477
@@ -505,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
505 goto out; 489 goto out;
506 } 490 }
507 spin_lock_irq(&idr_lock); 491 spin_lock_irq(&idr_lock);
508 error = idr_get_new(&posix_timers_id, (void *) new_timer, 492 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
509 &new_timer_id);
510 spin_unlock_irq(&idr_lock); 493 spin_unlock_irq(&idr_lock);
511 if (error == -EAGAIN) 494 if (error) {
512 goto retry; 495 if (error == -EAGAIN)
513 else if (error) { 496 goto retry;
514 /* 497 /*
515 * Weird looking, but we return EAGAIN if the IDR is 498 * Weird looking, but we return EAGAIN if the IDR is
516 * full (proper POSIX return value for this) 499 * full (proper POSIX return value for this)
@@ -541,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
541 error = -EFAULT; 524 error = -EFAULT;
542 goto out; 525 goto out;
543 } 526 }
544 new_timer->it_sigev_notify = event.sigev_notify; 527 rcu_read_lock();
545 new_timer->it_sigev_signo = event.sigev_signo; 528 process = good_sigevent(&event);
546 new_timer->it_sigev_value = event.sigev_value; 529 if (process)
547 530 get_task_struct(process);
548 read_lock(&tasklist_lock); 531 rcu_read_unlock();
549 if ((process = good_sigevent(&event))) {
550 /*
551 * We may be setting up this process for another
552 * thread. It may be exiting. To catch this
553 * case the we check the PF_EXITING flag. If
554 * the flag is not set, the siglock will catch
555 * him before it is too late (in exit_itimers).
556 *
557 * The exec case is a bit more invloved but easy
558 * to code. If the process is in our thread
559 * group (and it must be or we would not allow
560 * it here) and is doing an exec, it will cause
561 * us to be killed. In this case it will wait
562 * for us to die which means we can finish this
563 * linkage with our last gasp. I.e. no code :)
564 */
565 spin_lock_irqsave(&process->sighand->siglock, flags);
566 if (!(process->flags & PF_EXITING)) {
567 new_timer->it_process = process;
568 list_add(&new_timer->list,
569 &process->signal->posix_timers);
570 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
571 get_task_struct(process);
572 spin_unlock_irqrestore(&process->sighand->siglock, flags);
573 } else {
574 spin_unlock_irqrestore(&process->sighand->siglock, flags);
575 process = NULL;
576 }
577 }
578 read_unlock(&tasklist_lock);
579 if (!process) { 532 if (!process) {
580 error = -EINVAL; 533 error = -EINVAL;
581 goto out; 534 goto out;
582 } 535 }
583 } else { 536 } else {
584 new_timer->it_sigev_notify = SIGEV_SIGNAL; 537 event.sigev_notify = SIGEV_SIGNAL;
585 new_timer->it_sigev_signo = SIGALRM; 538 event.sigev_signo = SIGALRM;
586 new_timer->it_sigev_value.sival_int = new_timer->it_id; 539 event.sigev_value.sival_int = new_timer->it_id;
587 process = current->group_leader; 540 process = current->group_leader;
588 spin_lock_irqsave(&process->sighand->siglock, flags); 541 get_task_struct(process);
589 new_timer->it_process = process;
590 list_add(&new_timer->list, &process->signal->posix_timers);
591 spin_unlock_irqrestore(&process->sighand->siglock, flags);
592 } 542 }
593 543
544 new_timer->it_sigev_notify = event.sigev_notify;
545 new_timer->sigq->info.si_signo = event.sigev_signo;
546 new_timer->sigq->info.si_value = event.sigev_value;
547 new_timer->sigq->info.si_tid = new_timer->it_id;
548 new_timer->sigq->info.si_code = SI_TIMER;
549
550 spin_lock_irq(&current->sighand->siglock);
551 new_timer->it_process = process;
552 list_add(&new_timer->list, &current->signal->posix_timers);
553 spin_unlock_irq(&current->sighand->siglock);
554
555 return 0;
594 /* 556 /*
595 * In the case of the timer belonging to another task, after 557 * In the case of the timer belonging to another task, after
596 * the task is unlocked, the timer is owned by the other task 558 * the task is unlocked, the timer is owned by the other task
597 * and may cease to exist at any time. Don't use or modify 559 * and may cease to exist at any time. Don't use or modify
598 * new_timer after the unlock call. 560 * new_timer after the unlock call.
599 */ 561 */
600
601out: 562out:
602 if (error) 563 release_posix_timer(new_timer, it_id_set);
603 release_posix_timer(new_timer, it_id_set);
604
605 return error; 564 return error;
606} 565}
607 566
@@ -612,7 +571,7 @@ out:
612 * the find to the timer lock. To avoid a dead lock, the timer id MUST 571 * the find to the timer lock. To avoid a dead lock, the timer id MUST
613 * be release with out holding the timer lock. 572 * be release with out holding the timer lock.
614 */ 573 */
615static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) 574static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
616{ 575{
617 struct k_itimer *timr; 576 struct k_itimer *timr;
618 /* 577 /*
@@ -620,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
620 * flags part over to the timer lock. Must not let interrupts in 579 * flags part over to the timer lock. Must not let interrupts in
621 * while we are moving the lock. 580 * while we are moving the lock.
622 */ 581 */
623
624 spin_lock_irqsave(&idr_lock, *flags); 582 spin_lock_irqsave(&idr_lock, *flags);
625 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); 583 timr = idr_find(&posix_timers_id, (int)timer_id);
626 if (timr) { 584 if (timr) {
627 spin_lock(&timr->it_lock); 585 spin_lock(&timr->it_lock);
628 586 if (timr->it_process &&
629 if ((timr->it_id != timer_id) || !(timr->it_process) || 587 same_thread_group(timr->it_process, current)) {
630 !same_thread_group(timr->it_process, current)) {
631 spin_unlock(&timr->it_lock);
632 spin_unlock_irqrestore(&idr_lock, *flags);
633 timr = NULL;
634 } else
635 spin_unlock(&idr_lock); 588 spin_unlock(&idr_lock);
636 } else 589 return timr;
637 spin_unlock_irqrestore(&idr_lock, *flags); 590 }
591 spin_unlock(&timr->it_lock);
592 }
593 spin_unlock_irqrestore(&idr_lock, *flags);
638 594
639 return timr; 595 return NULL;
640} 596}
641 597
642/* 598/*
@@ -877,8 +833,7 @@ retry_delete:
877 * This keeps any tasks waiting on the spin lock from thinking 833 * This keeps any tasks waiting on the spin lock from thinking
878 * they got something (see the lock code above). 834 * they got something (see the lock code above).
879 */ 835 */
880 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 836 put_task_struct(timer->it_process);
881 put_task_struct(timer->it_process);
882 timer->it_process = NULL; 837 timer->it_process = NULL;
883 838
884 unlock_timer(timer, flags); 839 unlock_timer(timer, flags);
@@ -905,8 +860,7 @@ retry_delete:
905 * This keeps any tasks waiting on the spin lock from thinking 860 * This keeps any tasks waiting on the spin lock from thinking
906 * they got something (see the lock code above). 861 * they got something (see the lock code above).
907 */ 862 */
908 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 863 put_task_struct(timer->it_process);
909 put_task_struct(timer->it_process);
910 timer->it_process = NULL; 864 timer->it_process = NULL;
911 865
912 unlock_timer(timer, flags); 866 unlock_timer(timer, flags);