aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-timers.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-28 11:26:12 -0400
commit7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch)
treee730a4565e0318140d2fbd2f0415d18a339d7336 /kernel/posix-timers.c
parent41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff)
parent0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff)
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'kernel/posix-timers.c')
-rw-r--r--kernel/posix-timers.c197
1 files changed, 84 insertions, 113 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index dbd8398ddb0b..5e79c662294b 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -223,6 +223,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
223} 223}
224 224
225/* 225/*
226 * Get monotonic time for posix timers
227 */
228static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
229{
230 getrawmonotonic(tp);
231 return 0;
232}
233
234/*
226 * Initialize everything, well, just everything in Posix clocks/timers ;) 235 * Initialize everything, well, just everything in Posix clocks/timers ;)
227 */ 236 */
228static __init int init_posix_timers(void) 237static __init int init_posix_timers(void)
@@ -235,9 +244,15 @@ static __init int init_posix_timers(void)
235 .clock_get = posix_ktime_get_ts, 244 .clock_get = posix_ktime_get_ts,
236 .clock_set = do_posix_clock_nosettime, 245 .clock_set = do_posix_clock_nosettime,
237 }; 246 };
247 struct k_clock clock_monotonic_raw = {
248 .clock_getres = hrtimer_get_res,
249 .clock_get = posix_get_monotonic_raw,
250 .clock_set = do_posix_clock_nosettime,
251 };
238 252
239 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 253 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
240 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 254 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
255 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
241 256
242 posix_timers_cache = kmem_cache_create("posix_timers_cache", 257 posix_timers_cache = kmem_cache_create("posix_timers_cache",
243 sizeof (struct k_itimer), 0, SLAB_PANIC, 258 sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -289,39 +304,33 @@ void do_schedule_next_timer(struct siginfo *info)
289 else 304 else
290 schedule_next_timer(timr); 305 schedule_next_timer(timr);
291 306
292 info->si_overrun = timr->it_overrun_last; 307 info->si_overrun += timr->it_overrun_last;
293 } 308 }
294 309
295 if (timr) 310 if (timr)
296 unlock_timer(timr, flags); 311 unlock_timer(timr, flags);
297} 312}
298 313
299int posix_timer_event(struct k_itimer *timr,int si_private) 314int posix_timer_event(struct k_itimer *timr, int si_private)
300{ 315{
301 memset(&timr->sigq->info, 0, sizeof(siginfo_t)); 316 int shared, ret;
317 /*
318 * FIXME: if ->sigq is queued we can race with
319 * dequeue_signal()->do_schedule_next_timer().
320 *
321 * If dequeue_signal() sees the "right" value of
322 * si_sys_private it calls do_schedule_next_timer().
323 * We re-queue ->sigq and drop ->it_lock().
324 * do_schedule_next_timer() locks the timer
325 * and re-schedules it while ->sigq is pending.
326 * Not really bad, but not that we want.
327 */
302 timr->sigq->info.si_sys_private = si_private; 328 timr->sigq->info.si_sys_private = si_private;
303 /* Send signal to the process that owns this timer.*/
304
305 timr->sigq->info.si_signo = timr->it_sigev_signo;
306 timr->sigq->info.si_errno = 0;
307 timr->sigq->info.si_code = SI_TIMER;
308 timr->sigq->info.si_tid = timr->it_id;
309 timr->sigq->info.si_value = timr->it_sigev_value;
310
311 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
312 struct task_struct *leader;
313 int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
314
315 if (likely(ret >= 0))
316 return ret;
317
318 timr->it_sigev_notify = SIGEV_SIGNAL;
319 leader = timr->it_process->group_leader;
320 put_task_struct(timr->it_process);
321 timr->it_process = leader;
322 }
323 329
324 return send_sigqueue(timr->sigq, timr->it_process, 1); 330 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
331 ret = send_sigqueue(timr->sigq, timr->it_process, shared);
332 /* If we failed to send the signal the timer stops. */
333 return ret > 0;
325} 334}
326EXPORT_SYMBOL_GPL(posix_timer_event); 335EXPORT_SYMBOL_GPL(posix_timer_event);
327 336
@@ -433,8 +442,9 @@ static struct k_itimer * alloc_posix_timer(void)
433 return tmr; 442 return tmr;
434 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 443 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
435 kmem_cache_free(posix_timers_cache, tmr); 444 kmem_cache_free(posix_timers_cache, tmr);
436 tmr = NULL; 445 return NULL;
437 } 446 }
447 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
438 return tmr; 448 return tmr;
439} 449}
440 450
@@ -449,9 +459,6 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
449 spin_unlock_irqrestore(&idr_lock, flags); 459 spin_unlock_irqrestore(&idr_lock, flags);
450 } 460 }
451 sigqueue_free(tmr->sigq); 461 sigqueue_free(tmr->sigq);
452 if (unlikely(tmr->it_process) &&
453 tmr->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
454 put_task_struct(tmr->it_process);
455 kmem_cache_free(posix_timers_cache, tmr); 462 kmem_cache_free(posix_timers_cache, tmr);
456} 463}
457 464
@@ -462,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
462 struct sigevent __user *timer_event_spec, 469 struct sigevent __user *timer_event_spec,
463 timer_t __user * created_timer_id) 470 timer_t __user * created_timer_id)
464{ 471{
465 int error = 0; 472 struct k_itimer *new_timer;
466 struct k_itimer *new_timer = NULL; 473 int error, new_timer_id;
467 int new_timer_id; 474 struct task_struct *process;
468 struct task_struct *process = NULL;
469 unsigned long flags;
470 sigevent_t event; 475 sigevent_t event;
471 int it_id_set = IT_ID_NOT_SET; 476 int it_id_set = IT_ID_NOT_SET;
472 477
@@ -484,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
484 goto out; 489 goto out;
485 } 490 }
486 spin_lock_irq(&idr_lock); 491 spin_lock_irq(&idr_lock);
487 error = idr_get_new(&posix_timers_id, (void *) new_timer, 492 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
488 &new_timer_id);
489 spin_unlock_irq(&idr_lock); 493 spin_unlock_irq(&idr_lock);
490 if (error == -EAGAIN) 494 if (error) {
491 goto retry; 495 if (error == -EAGAIN)
492 else if (error) { 496 goto retry;
493 /* 497 /*
494 * Weird looking, but we return EAGAIN if the IDR is 498 * Weird looking, but we return EAGAIN if the IDR is
495 * full (proper POSIX return value for this) 499 * full (proper POSIX return value for this)
@@ -520,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
520 error = -EFAULT; 524 error = -EFAULT;
521 goto out; 525 goto out;
522 } 526 }
523 new_timer->it_sigev_notify = event.sigev_notify; 527 rcu_read_lock();
524 new_timer->it_sigev_signo = event.sigev_signo; 528 process = good_sigevent(&event);
525 new_timer->it_sigev_value = event.sigev_value; 529 if (process)
526 530 get_task_struct(process);
527 read_lock(&tasklist_lock); 531 rcu_read_unlock();
528 if ((process = good_sigevent(&event))) {
529 /*
530 * We may be setting up this process for another
531 * thread. It may be exiting. To catch this
532 * case the we check the PF_EXITING flag. If
533 * the flag is not set, the siglock will catch
534 * him before it is too late (in exit_itimers).
535 *
536 * The exec case is a bit more invloved but easy
537 * to code. If the process is in our thread
538 * group (and it must be or we would not allow
539 * it here) and is doing an exec, it will cause
540 * us to be killed. In this case it will wait
541 * for us to die which means we can finish this
542 * linkage with our last gasp. I.e. no code :)
543 */
544 spin_lock_irqsave(&process->sighand->siglock, flags);
545 if (!(process->flags & PF_EXITING)) {
546 new_timer->it_process = process;
547 list_add(&new_timer->list,
548 &process->signal->posix_timers);
549 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
550 get_task_struct(process);
551 spin_unlock_irqrestore(&process->sighand->siglock, flags);
552 } else {
553 spin_unlock_irqrestore(&process->sighand->siglock, flags);
554 process = NULL;
555 }
556 }
557 read_unlock(&tasklist_lock);
558 if (!process) { 532 if (!process) {
559 error = -EINVAL; 533 error = -EINVAL;
560 goto out; 534 goto out;
561 } 535 }
562 } else { 536 } else {
563 new_timer->it_sigev_notify = SIGEV_SIGNAL; 537 event.sigev_notify = SIGEV_SIGNAL;
564 new_timer->it_sigev_signo = SIGALRM; 538 event.sigev_signo = SIGALRM;
565 new_timer->it_sigev_value.sival_int = new_timer->it_id; 539 event.sigev_value.sival_int = new_timer->it_id;
566 process = current->group_leader; 540 process = current->group_leader;
567 spin_lock_irqsave(&process->sighand->siglock, flags); 541 get_task_struct(process);
568 new_timer->it_process = process;
569 list_add(&new_timer->list, &process->signal->posix_timers);
570 spin_unlock_irqrestore(&process->sighand->siglock, flags);
571 } 542 }
572 543
544 new_timer->it_sigev_notify = event.sigev_notify;
545 new_timer->sigq->info.si_signo = event.sigev_signo;
546 new_timer->sigq->info.si_value = event.sigev_value;
547 new_timer->sigq->info.si_tid = new_timer->it_id;
548 new_timer->sigq->info.si_code = SI_TIMER;
549
550 spin_lock_irq(&current->sighand->siglock);
551 new_timer->it_process = process;
552 list_add(&new_timer->list, &current->signal->posix_timers);
553 spin_unlock_irq(&current->sighand->siglock);
554
555 return 0;
573 /* 556 /*
574 * In the case of the timer belonging to another task, after 557 * In the case of the timer belonging to another task, after
575 * the task is unlocked, the timer is owned by the other task 558 * the task is unlocked, the timer is owned by the other task
576 * and may cease to exist at any time. Don't use or modify 559 * and may cease to exist at any time. Don't use or modify
577 * new_timer after the unlock call. 560 * new_timer after the unlock call.
578 */ 561 */
579
580out: 562out:
581 if (error) 563 release_posix_timer(new_timer, it_id_set);
582 release_posix_timer(new_timer, it_id_set);
583
584 return error; 564 return error;
585} 565}
586 566
@@ -591,7 +571,7 @@ out:
591 * the find to the timer lock. To avoid a dead lock, the timer id MUST 571 * the find to the timer lock. To avoid a dead lock, the timer id MUST
592 * be release with out holding the timer lock. 572 * be release with out holding the timer lock.
593 */ 573 */
594static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) 574static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
595{ 575{
596 struct k_itimer *timr; 576 struct k_itimer *timr;
597 /* 577 /*
@@ -599,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
599 * flags part over to the timer lock. Must not let interrupts in 579 * flags part over to the timer lock. Must not let interrupts in
600 * while we are moving the lock. 580 * while we are moving the lock.
601 */ 581 */
602
603 spin_lock_irqsave(&idr_lock, *flags); 582 spin_lock_irqsave(&idr_lock, *flags);
604 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); 583 timr = idr_find(&posix_timers_id, (int)timer_id);
605 if (timr) { 584 if (timr) {
606 spin_lock(&timr->it_lock); 585 spin_lock(&timr->it_lock);
607 586 if (timr->it_process &&
608 if ((timr->it_id != timer_id) || !(timr->it_process) || 587 same_thread_group(timr->it_process, current)) {
609 !same_thread_group(timr->it_process, current)) {
610 spin_unlock(&timr->it_lock);
611 spin_unlock_irqrestore(&idr_lock, *flags);
612 timr = NULL;
613 } else
614 spin_unlock(&idr_lock); 588 spin_unlock(&idr_lock);
615 } else 589 return timr;
616 spin_unlock_irqrestore(&idr_lock, *flags); 590 }
591 spin_unlock(&timr->it_lock);
592 }
593 spin_unlock_irqrestore(&idr_lock, *flags);
617 594
618 return timr; 595 return NULL;
619} 596}
620 597
621/* 598/*
@@ -662,7 +639,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
662 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) 639 (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
663 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); 640 timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
664 641
665 remaining = ktime_sub(timer->expires, now); 642 remaining = ktime_sub(hrtimer_get_expires(timer), now);
666 /* Return 0 only, when the timer is expired and not pending */ 643 /* Return 0 only, when the timer is expired and not pending */
667 if (remaining.tv64 <= 0) { 644 if (remaining.tv64 <= 0) {
668 /* 645 /*
@@ -756,7 +733,7 @@ common_timer_set(struct k_itimer *timr, int flags,
756 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); 733 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
757 timr->it.real.timer.function = posix_timer_fn; 734 timr->it.real.timer.function = posix_timer_fn;
758 735
759 timer->expires = timespec_to_ktime(new_setting->it_value); 736 hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value));
760 737
761 /* Convert interval */ 738 /* Convert interval */
762 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); 739 timr->it.real.interval = timespec_to_ktime(new_setting->it_interval);
@@ -765,14 +742,12 @@ common_timer_set(struct k_itimer *timr, int flags,
765 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { 742 if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) {
766 /* Setup correct expiry time for relative timers */ 743 /* Setup correct expiry time for relative timers */
767 if (mode == HRTIMER_MODE_REL) { 744 if (mode == HRTIMER_MODE_REL) {
768 timer->expires = 745 hrtimer_add_expires(timer, timer->base->get_time());
769 ktime_add_safe(timer->expires,
770 timer->base->get_time());
771 } 746 }
772 return 0; 747 return 0;
773 } 748 }
774 749
775 hrtimer_start(timer, timer->expires, mode); 750 hrtimer_start_expires(timer, mode);
776 return 0; 751 return 0;
777} 752}
778 753
@@ -856,11 +831,9 @@ retry_delete:
856 * This keeps any tasks waiting on the spin lock from thinking 831 * This keeps any tasks waiting on the spin lock from thinking
857 * they got something (see the lock code above). 832 * they got something (see the lock code above).
858 */ 833 */
859 if (timer->it_process) { 834 put_task_struct(timer->it_process);
860 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 835 timer->it_process = NULL;
861 put_task_struct(timer->it_process); 836
862 timer->it_process = NULL;
863 }
864 unlock_timer(timer, flags); 837 unlock_timer(timer, flags);
865 release_posix_timer(timer, IT_ID_SET); 838 release_posix_timer(timer, IT_ID_SET);
866 return 0; 839 return 0;
@@ -885,11 +858,9 @@ retry_delete:
885 * This keeps any tasks waiting on the spin lock from thinking 858 * This keeps any tasks waiting on the spin lock from thinking
886 * they got something (see the lock code above). 859 * they got something (see the lock code above).
887 */ 860 */
888 if (timer->it_process) { 861 put_task_struct(timer->it_process);
889 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 862 timer->it_process = NULL;
890 put_task_struct(timer->it_process); 863
891 timer->it_process = NULL;
892 }
893 unlock_timer(timer, flags); 864 unlock_timer(timer, flags);
894 release_posix_timer(timer, IT_ID_SET); 865 release_posix_timer(timer, IT_ID_SET);
895} 866}