aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r--kernel/posix-timers.c181
1 files changed, 77 insertions, 104 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5131e5471169..887c63787de6 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -116,7 +116,7 @@ static DEFINE_SPINLOCK(idr_lock);
116 * must supply functions here, even if the function just returns 116 * must supply functions here, even if the function just returns
117 * ENOSYS. The standard POSIX timer management code assumes the 117 * ENOSYS. The standard POSIX timer management code assumes the
118 * following: 1.) The k_itimer struct (sched.h) is used for the 118 * following: 1.) The k_itimer struct (sched.h) is used for the
119 * timer. 2.) The list, it_lock, it_clock, it_id and it_process 119 * timer. 2.) The list, it_lock, it_clock, it_id and it_pid
120 * fields are not modified by timer code. 120 * fields are not modified by timer code.
121 * 121 *
122 * At this time all functions EXCEPT clock_nanosleep can be 122 * At this time all functions EXCEPT clock_nanosleep can be
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer)
197 return 0; 197 return 0;
198} 198}
199 199
200static int no_timer_create(struct k_itimer *new_timer)
201{
202 return -EOPNOTSUPP;
203}
204
200/* 205/*
201 * Return nonzero if we know a priori this clockid_t value is bogus. 206 * Return nonzero if we know a priori this clockid_t value is bogus.
202 */ 207 */
@@ -223,6 +228,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
223} 228}
224 229
225/* 230/*
231 * Get monotonic time for posix timers
232 */
233static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
234{
235 getrawmonotonic(tp);
236 return 0;
237}
238
239/*
226 * Initialize everything, well, just everything in Posix clocks/timers ;) 240 * Initialize everything, well, just everything in Posix clocks/timers ;)
227 */ 241 */
228static __init int init_posix_timers(void) 242static __init int init_posix_timers(void)
@@ -235,9 +249,16 @@ static __init int init_posix_timers(void)
235 .clock_get = posix_ktime_get_ts, 249 .clock_get = posix_ktime_get_ts,
236 .clock_set = do_posix_clock_nosettime, 250 .clock_set = do_posix_clock_nosettime,
237 }; 251 };
252 struct k_clock clock_monotonic_raw = {
253 .clock_getres = hrtimer_get_res,
254 .clock_get = posix_get_monotonic_raw,
255 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create,
257 };
238 258
239 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 259 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
240 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 260 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
261 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
241 262
242 posix_timers_cache = kmem_cache_create("posix_timers_cache", 263 posix_timers_cache = kmem_cache_create("posix_timers_cache",
243 sizeof (struct k_itimer), 0, SLAB_PANIC, 264 sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -298,6 +319,8 @@ void do_schedule_next_timer(struct siginfo *info)
298 319
299int posix_timer_event(struct k_itimer *timr, int si_private) 320int posix_timer_event(struct k_itimer *timr, int si_private)
300{ 321{
322 struct task_struct *task;
323 int shared, ret = -1;
301 /* 324 /*
302 * FIXME: if ->sigq is queued we can race with 325 * FIXME: if ->sigq is queued we can race with
303 * dequeue_signal()->do_schedule_next_timer(). 326 * dequeue_signal()->do_schedule_next_timer().
@@ -311,25 +334,15 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
311 */ 334 */
312 timr->sigq->info.si_sys_private = si_private; 335 timr->sigq->info.si_sys_private = si_private;
313 336
314 timr->sigq->info.si_signo = timr->it_sigev_signo; 337 rcu_read_lock();
315 timr->sigq->info.si_code = SI_TIMER; 338 task = pid_task(timr->it_pid, PIDTYPE_PID);
316 timr->sigq->info.si_tid = timr->it_id; 339 if (task) {
317 timr->sigq->info.si_value = timr->it_sigev_value; 340 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
318 341 ret = send_sigqueue(timr->sigq, task, shared);
319 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
320 struct task_struct *leader;
321 int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
322
323 if (likely(ret >= 0))
324 return ret;
325
326 timr->it_sigev_notify = SIGEV_SIGNAL;
327 leader = timr->it_process->group_leader;
328 put_task_struct(timr->it_process);
329 timr->it_process = leader;
330 } 342 }
331 343 rcu_read_unlock();
332 return send_sigqueue(timr->sigq, timr->it_process, 1); 344 /* If we failed to send the signal the timer stops. */
345 return ret > 0;
333} 346}
334EXPORT_SYMBOL_GPL(posix_timer_event); 347EXPORT_SYMBOL_GPL(posix_timer_event);
335 348
@@ -404,7 +417,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
404 return ret; 417 return ret;
405} 418}
406 419
407static struct task_struct * good_sigevent(sigevent_t * event) 420static struct pid *good_sigevent(sigevent_t * event)
408{ 421{
409 struct task_struct *rtn = current->group_leader; 422 struct task_struct *rtn = current->group_leader;
410 423
@@ -418,7 +431,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
418 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) 431 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
419 return NULL; 432 return NULL;
420 433
421 return rtn; 434 return task_pid(rtn);
422} 435}
423 436
424void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock) 437void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
@@ -457,6 +470,7 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
457 idr_remove(&posix_timers_id, tmr->it_id); 470 idr_remove(&posix_timers_id, tmr->it_id);
458 spin_unlock_irqrestore(&idr_lock, flags); 471 spin_unlock_irqrestore(&idr_lock, flags);
459 } 472 }
473 put_pid(tmr->it_pid);
460 sigqueue_free(tmr->sigq); 474 sigqueue_free(tmr->sigq);
461 kmem_cache_free(posix_timers_cache, tmr); 475 kmem_cache_free(posix_timers_cache, tmr);
462} 476}
@@ -468,11 +482,8 @@ sys_timer_create(const clockid_t which_clock,
468 struct sigevent __user *timer_event_spec, 482 struct sigevent __user *timer_event_spec,
469 timer_t __user * created_timer_id) 483 timer_t __user * created_timer_id)
470{ 484{
471 int error = 0; 485 struct k_itimer *new_timer;
472 struct k_itimer *new_timer = NULL; 486 int error, new_timer_id;
473 int new_timer_id;
474 struct task_struct *process = NULL;
475 unsigned long flags;
476 sigevent_t event; 487 sigevent_t event;
477 int it_id_set = IT_ID_NOT_SET; 488 int it_id_set = IT_ID_NOT_SET;
478 489
@@ -490,12 +501,11 @@ sys_timer_create(const clockid_t which_clock,
490 goto out; 501 goto out;
491 } 502 }
492 spin_lock_irq(&idr_lock); 503 spin_lock_irq(&idr_lock);
493 error = idr_get_new(&posix_timers_id, (void *) new_timer, 504 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
494 &new_timer_id);
495 spin_unlock_irq(&idr_lock); 505 spin_unlock_irq(&idr_lock);
496 if (error == -EAGAIN) 506 if (error) {
497 goto retry; 507 if (error == -EAGAIN)
498 else if (error) { 508 goto retry;
499 /* 509 /*
500 * Weird looking, but we return EAGAIN if the IDR is 510 * Weird looking, but we return EAGAIN if the IDR is
501 * full (proper POSIX return value for this) 511 * full (proper POSIX return value for this)
@@ -526,67 +536,40 @@ sys_timer_create(const clockid_t which_clock,
526 error = -EFAULT; 536 error = -EFAULT;
527 goto out; 537 goto out;
528 } 538 }
529 new_timer->it_sigev_notify = event.sigev_notify; 539 rcu_read_lock();
530 new_timer->it_sigev_signo = event.sigev_signo; 540 new_timer->it_pid = get_pid(good_sigevent(&event));
531 new_timer->it_sigev_value = event.sigev_value; 541 rcu_read_unlock();
532 542 if (!new_timer->it_pid) {
533 read_lock(&tasklist_lock);
534 if ((process = good_sigevent(&event))) {
535 /*
536 * We may be setting up this process for another
537 * thread. It may be exiting. To catch this
538 * case the we check the PF_EXITING flag. If
539 * the flag is not set, the siglock will catch
540 * him before it is too late (in exit_itimers).
541 *
542 * The exec case is a bit more invloved but easy
543 * to code. If the process is in our thread
544 * group (and it must be or we would not allow
545 * it here) and is doing an exec, it will cause
546 * us to be killed. In this case it will wait
547 * for us to die which means we can finish this
548 * linkage with our last gasp. I.e. no code :)
549 */
550 spin_lock_irqsave(&process->sighand->siglock, flags);
551 if (!(process->flags & PF_EXITING)) {
552 new_timer->it_process = process;
553 list_add(&new_timer->list,
554 &process->signal->posix_timers);
555 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
556 get_task_struct(process);
557 spin_unlock_irqrestore(&process->sighand->siglock, flags);
558 } else {
559 spin_unlock_irqrestore(&process->sighand->siglock, flags);
560 process = NULL;
561 }
562 }
563 read_unlock(&tasklist_lock);
564 if (!process) {
565 error = -EINVAL; 543 error = -EINVAL;
566 goto out; 544 goto out;
567 } 545 }
568 } else { 546 } else {
569 new_timer->it_sigev_notify = SIGEV_SIGNAL; 547 event.sigev_notify = SIGEV_SIGNAL;
570 new_timer->it_sigev_signo = SIGALRM; 548 event.sigev_signo = SIGALRM;
571 new_timer->it_sigev_value.sival_int = new_timer->it_id; 549 event.sigev_value.sival_int = new_timer->it_id;
572 process = current->group_leader; 550 new_timer->it_pid = get_pid(task_tgid(current));
573 spin_lock_irqsave(&process->sighand->siglock, flags);
574 new_timer->it_process = process;
575 list_add(&new_timer->list, &process->signal->posix_timers);
576 spin_unlock_irqrestore(&process->sighand->siglock, flags);
577 } 551 }
578 552
553 new_timer->it_sigev_notify = event.sigev_notify;
554 new_timer->sigq->info.si_signo = event.sigev_signo;
555 new_timer->sigq->info.si_value = event.sigev_value;
556 new_timer->sigq->info.si_tid = new_timer->it_id;
557 new_timer->sigq->info.si_code = SI_TIMER;
558
559 spin_lock_irq(&current->sighand->siglock);
560 new_timer->it_signal = current->signal;
561 list_add(&new_timer->list, &current->signal->posix_timers);
562 spin_unlock_irq(&current->sighand->siglock);
563
564 return 0;
579 /* 565 /*
580 * In the case of the timer belonging to another task, after 566 * In the case of the timer belonging to another task, after
581 * the task is unlocked, the timer is owned by the other task 567 * the task is unlocked, the timer is owned by the other task
582 * and may cease to exist at any time. Don't use or modify 568 * and may cease to exist at any time. Don't use or modify
583 * new_timer after the unlock call. 569 * new_timer after the unlock call.
584 */ 570 */
585
586out: 571out:
587 if (error) 572 release_posix_timer(new_timer, it_id_set);
588 release_posix_timer(new_timer, it_id_set);
589
590 return error; 573 return error;
591} 574}
592 575
@@ -597,7 +580,7 @@ out:
597 * the find to the timer lock. To avoid a dead lock, the timer id MUST 580 * the find to the timer lock. To avoid a dead lock, the timer id MUST
598 * be release with out holding the timer lock. 581 * be release with out holding the timer lock.
599 */ 582 */
600static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) 583static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
601{ 584{
602 struct k_itimer *timr; 585 struct k_itimer *timr;
603 /* 586 /*
@@ -605,23 +588,19 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
605 * flags part over to the timer lock. Must not let interrupts in 588 * flags part over to the timer lock. Must not let interrupts in
606 * while we are moving the lock. 589 * while we are moving the lock.
607 */ 590 */
608
609 spin_lock_irqsave(&idr_lock, *flags); 591 spin_lock_irqsave(&idr_lock, *flags);
610 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); 592 timr = idr_find(&posix_timers_id, (int)timer_id);
611 if (timr) { 593 if (timr) {
612 spin_lock(&timr->it_lock); 594 spin_lock(&timr->it_lock);
613 595 if (timr->it_signal == current->signal) {
614 if ((timr->it_id != timer_id) || !(timr->it_process) ||
615 !same_thread_group(timr->it_process, current)) {
616 spin_unlock(&timr->it_lock);
617 spin_unlock_irqrestore(&idr_lock, *flags);
618 timr = NULL;
619 } else
620 spin_unlock(&idr_lock); 596 spin_unlock(&idr_lock);
621 } else 597 return timr;
622 spin_unlock_irqrestore(&idr_lock, *flags); 598 }
599 spin_unlock(&timr->it_lock);
600 }
601 spin_unlock_irqrestore(&idr_lock, *flags);
623 602
624 return timr; 603 return NULL;
625} 604}
626 605
627/* 606/*
@@ -668,7 +647,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
668 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 647 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
669 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 648 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
670 649
671 remaining = ktime_sub(timer->expires, now); 650 remaining = ktime_sub(hrtimer_get_expires(timer), now);
672 /* Return 0 only, when the timer is expired and not pending */ 651 /* Return 0 only, when the timer is expired and not pending */
673 if (remaining.tv64 <= 0) { 652 if (remaining.tv64 <= 0) {
674 /* 653 /*
@@ -762,7 +741,7 @@ common_timer_set(struct k_itimer *timr, int flags,
762 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 741 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
763 timr->it.real.timer.function = posix_timer_fn; 742 timr->it.real.timer.function = posix_timer_fn;
764 743
765 timer->expires = timespec_to_ktime(new_setting->it_value); 744 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
766 745
767 /* Convert interval */ 746 /* Convert interval */
768 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 747 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
@@ -771,14 +750,12 @@ common_timer_set(struct k_itimer *timr, int flags,
771 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { 750 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
772 /* Setup correct expiry time for relative timers */ 751 /* Setup correct expiry time for relative timers */
773 if (mode == HRTIMER_MODE_REL) { 752 if (mode == HRTIMER_MODE_REL) {
774 timer->expires = 753 hrtimer_add_expires(timer, timer->base->get_time());
775 ktime_add_safe(timer->expires,
776 timer->base->get_time());
777 } 754 }
778 return 0; 755 return 0;
779 } 756 }
780 757
781 hrtimer_start(timer, timer->expires, mode); 758 hrtimer_start_expires(timer, mode);
782 return 0; 759 return 0;
783} 760}
784 761
@@ -862,9 +839,7 @@ retry_delete:
862 * This keeps any tasks waiting on the spin lock from thinking 839 * This keeps any tasks waiting on the spin lock from thinking
863 * they got something (see the lock code above). 840 * they got something (see the lock code above).
864 */ 841 */
865 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 842 timer->it_signal = NULL;
866 put_task_struct(timer->it_process);
867 timer->it_process = NULL;
868 843
869 unlock_timer(timer, flags); 844 unlock_timer(timer, flags);
870 release_posix_timer(timer, IT_ID_SET); 845 release_posix_timer(timer, IT_ID_SET);
@@ -890,9 +865,7 @@ retry_delete:
890 * This keeps any tasks waiting on the spin lock from thinking 865 * This keeps any tasks waiting on the spin lock from thinking
891 * they got something (see the lock code above). 866 * they got something (see the lock code above).
892 */ 867 */
893 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 868 timer->it_signal = NULL;
894 put_task_struct(timer->it_process);
895 timer->it_process = NULL;
896 869
897 unlock_timer(timer, flags); 870 unlock_timer(timer, flags);
898 release_posix_timer(timer, IT_ID_SET); 871 release_posix_timer(timer, IT_ID_SET);