diff options
Diffstat (limited to 'kernel/posix-timers.c')
| -rw-r--r-- | kernel/posix-timers.c | 163 |
1 files changed, 65 insertions, 98 deletions
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5131e5471169..5e79c662294b 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
| @@ -223,6 +223,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp) | |||
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | /* | 225 | /* |
| 226 | * Get monotonic time for posix timers | ||
| 227 | */ | ||
| 228 | static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp) | ||
| 229 | { | ||
| 230 | getrawmonotonic(tp); | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | /* | ||
| 226 | * Initialize everything, well, just everything in Posix clocks/timers ;) | 235 | * Initialize everything, well, just everything in Posix clocks/timers ;) |
| 227 | */ | 236 | */ |
| 228 | static __init int init_posix_timers(void) | 237 | static __init int init_posix_timers(void) |
| @@ -235,9 +244,15 @@ static __init int init_posix_timers(void) | |||
| 235 | .clock_get = posix_ktime_get_ts, | 244 | .clock_get = posix_ktime_get_ts, |
| 236 | .clock_set = do_posix_clock_nosettime, | 245 | .clock_set = do_posix_clock_nosettime, |
| 237 | }; | 246 | }; |
| 247 | struct k_clock clock_monotonic_raw = { | ||
| 248 | .clock_getres = hrtimer_get_res, | ||
| 249 | .clock_get = posix_get_monotonic_raw, | ||
| 250 | .clock_set = do_posix_clock_nosettime, | ||
| 251 | }; | ||
| 238 | 252 | ||
| 239 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 253 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
| 240 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); | 254 | register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); |
| 255 | register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw); | ||
| 241 | 256 | ||
| 242 | posix_timers_cache = kmem_cache_create("posix_timers_cache", | 257 | posix_timers_cache = kmem_cache_create("posix_timers_cache", |
| 243 | sizeof (struct k_itimer), 0, SLAB_PANIC, | 258 | sizeof (struct k_itimer), 0, SLAB_PANIC, |
| @@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info) | |||
| 298 | 313 | ||
| 299 | int posix_timer_event(struct k_itimer *timr, int si_private) | 314 | int posix_timer_event(struct k_itimer *timr, int si_private) |
| 300 | { | 315 | { |
| 316 | int shared, ret; | ||
| 301 | /* | 317 | /* |
| 302 | * FIXME: if ->sigq is queued we can race with | 318 | * FIXME: if ->sigq is queued we can race with |
| 303 | * dequeue_signal()->do_schedule_next_timer(). | 319 | * dequeue_signal()->do_schedule_next_timer(). |
| @@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private) | |||
| 311 | */ | 327 | */ |
| 312 | timr->sigq->info.si_sys_private = si_private; | 328 | timr->sigq->info.si_sys_private = si_private; |
| 313 | 329 | ||
| 314 | timr->sigq->info.si_signo = timr->it_sigev_signo; | 330 | shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID); |
| 315 | timr->sigq->info.si_code = SI_TIMER; | 331 | ret = send_sigqueue(timr->sigq, timr->it_process, shared); |
| 316 | timr->sigq->info.si_tid = timr->it_id; | 332 | /* If we failed to send the signal the timer stops. */ |
| 317 | timr->sigq->info.si_value = timr->it_sigev_value; | 333 | return ret > 0; |
| 318 | |||
| 319 | if (timr->it_sigev_notify & SIGEV_THREAD_ID) { | ||
| 320 | struct task_struct *leader; | ||
| 321 | int ret = send_sigqueue(timr->sigq, timr->it_process, 0); | ||
| 322 | |||
| 323 | if (likely(ret >= 0)) | ||
| 324 | return ret; | ||
| 325 | |||
| 326 | timr->it_sigev_notify = SIGEV_SIGNAL; | ||
| 327 | leader = timr->it_process->group_leader; | ||
| 328 | put_task_struct(timr->it_process); | ||
| 329 | timr->it_process = leader; | ||
| 330 | } | ||
| 331 | |||
| 332 | return send_sigqueue(timr->sigq, timr->it_process, 1); | ||
| 333 | } | 334 | } |
| 334 | EXPORT_SYMBOL_GPL(posix_timer_event); | 335 | EXPORT_SYMBOL_GPL(posix_timer_event); |
| 335 | 336 | ||
| @@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock, | |||
| 468 | struct sigevent __user *timer_event_spec, | 469 | struct sigevent __user *timer_event_spec, |
| 469 | timer_t __user * created_timer_id) | 470 | timer_t __user * created_timer_id) |
| 470 | { | 471 | { |
| 471 | int error = 0; | 472 | struct k_itimer *new_timer; |
| 472 | struct k_itimer *new_timer = NULL; | 473 | int error, new_timer_id; |
| 473 | int new_timer_id; | 474 | struct task_struct *process; |
| 474 | struct task_struct *process = NULL; | ||
| 475 | unsigned long flags; | ||
| 476 | sigevent_t event; | 475 | sigevent_t event; |
| 477 | int it_id_set = IT_ID_NOT_SET; | 476 | int it_id_set = IT_ID_NOT_SET; |
| 478 | 477 | ||
| @@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock, | |||
| 490 | goto out; | 489 | goto out; |
| 491 | } | 490 | } |
| 492 | spin_lock_irq(&idr_lock); | 491 | spin_lock_irq(&idr_lock); |
| 493 | error = idr_get_new(&posix_timers_id, (void *) new_timer, | 492 | error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id); |
| 494 | &new_timer_id); | ||
| 495 | spin_unlock_irq(&idr_lock); | 493 | spin_unlock_irq(&idr_lock); |
| 496 | if (error == -EAGAIN) | 494 | if (error) { |
| 497 | goto retry; | 495 | if (error == -EAGAIN) |
| 498 | else if (error) { | 496 | goto retry; |
| 499 | /* | 497 | /* |
| 500 | * Weird looking, but we return EAGAIN if the IDR is | 498 | * Weird looking, but we return EAGAIN if the IDR is |
| 501 | * full (proper POSIX return value for this) | 499 | * full (proper POSIX return value for this) |
| @@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock, | |||
| 526 | error = -EFAULT; | 524 | error = -EFAULT; |
| 527 | goto out; | 525 | goto out; |
| 528 | } | 526 | } |
| 529 | new_timer->it_sigev_notify = event.sigev_notify; | 527 | rcu_read_lock(); |
| 530 | new_timer->it_sigev_signo = event.sigev_signo; | 528 | process = good_sigevent(&event); |
| 531 | new_timer->it_sigev_value = event.sigev_value; | 529 | if (process) |
| 532 | 530 | get_task_struct(process); | |
| 533 | read_lock(&tasklist_lock); | 531 | rcu_read_unlock(); |
| 534 | if ((process = good_sigevent(&event))) { | ||
| 535 | /* | ||
| 536 | * We may be setting up this process for another | ||
| 537 | * thread. It may be exiting. To catch this | ||
| 538 | * case the we check the PF_EXITING flag. If | ||
| 539 | * the flag is not set, the siglock will catch | ||
| 540 | * him before it is too late (in exit_itimers). | ||
| 541 | * | ||
| 542 | * The exec case is a bit more invloved but easy | ||
| 543 | * to code. If the process is in our thread | ||
| 544 | * group (and it must be or we would not allow | ||
| 545 | * it here) and is doing an exec, it will cause | ||
| 546 | * us to be killed. In this case it will wait | ||
| 547 | * for us to die which means we can finish this | ||
| 548 | * linkage with our last gasp. I.e. no code :) | ||
| 549 | */ | ||
| 550 | spin_lock_irqsave(&process->sighand->siglock, flags); | ||
| 551 | if (!(process->flags & PF_EXITING)) { | ||
| 552 | new_timer->it_process = process; | ||
| 553 | list_add(&new_timer->list, | ||
| 554 | &process->signal->posix_timers); | ||
| 555 | if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | ||
| 556 | get_task_struct(process); | ||
| 557 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | ||
| 558 | } else { | ||
| 559 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | ||
| 560 | process = NULL; | ||
| 561 | } | ||
| 562 | } | ||
| 563 | read_unlock(&tasklist_lock); | ||
| 564 | if (!process) { | 532 | if (!process) { |
| 565 | error = -EINVAL; | 533 | error = -EINVAL; |
| 566 | goto out; | 534 | goto out; |
| 567 | } | 535 | } |
| 568 | } else { | 536 | } else { |
| 569 | new_timer->it_sigev_notify = SIGEV_SIGNAL; | 537 | event.sigev_notify = SIGEV_SIGNAL; |
| 570 | new_timer->it_sigev_signo = SIGALRM; | 538 | event.sigev_signo = SIGALRM; |
| 571 | new_timer->it_sigev_value.sival_int = new_timer->it_id; | 539 | event.sigev_value.sival_int = new_timer->it_id; |
| 572 | process = current->group_leader; | 540 | process = current->group_leader; |
| 573 | spin_lock_irqsave(&process->sighand->siglock, flags); | 541 | get_task_struct(process); |
| 574 | new_timer->it_process = process; | ||
| 575 | list_add(&new_timer->list, &process->signal->posix_timers); | ||
| 576 | spin_unlock_irqrestore(&process->sighand->siglock, flags); | ||
| 577 | } | 542 | } |
| 578 | 543 | ||
| 544 | new_timer->it_sigev_notify = event.sigev_notify; | ||
| 545 | new_timer->sigq->info.si_signo = event.sigev_signo; | ||
| 546 | new_timer->sigq->info.si_value = event.sigev_value; | ||
| 547 | new_timer->sigq->info.si_tid = new_timer->it_id; | ||
| 548 | new_timer->sigq->info.si_code = SI_TIMER; | ||
| 549 | |||
| 550 | spin_lock_irq(¤t->sighand->siglock); | ||
| 551 | new_timer->it_process = process; | ||
| 552 | list_add(&new_timer->list, ¤t->signal->posix_timers); | ||
| 553 | spin_unlock_irq(¤t->sighand->siglock); | ||
| 554 | |||
| 555 | return 0; | ||
| 579 | /* | 556 | /* |
| 580 | * In the case of the timer belonging to another task, after | 557 | * In the case of the timer belonging to another task, after |
| 581 | * the task is unlocked, the timer is owned by the other task | 558 | * the task is unlocked, the timer is owned by the other task |
| 582 | * and may cease to exist at any time. Don't use or modify | 559 | * and may cease to exist at any time. Don't use or modify |
| 583 | * new_timer after the unlock call. | 560 | * new_timer after the unlock call. |
| 584 | */ | 561 | */ |
| 585 | |||
| 586 | out: | 562 | out: |
| 587 | if (error) | 563 | release_posix_timer(new_timer, it_id_set); |
| 588 | release_posix_timer(new_timer, it_id_set); | ||
| 589 | |||
| 590 | return error; | 564 | return error; |
| 591 | } | 565 | } |
| 592 | 566 | ||
| @@ -597,7 +571,7 @@ out: | |||
| 597 | * the find to the timer lock. To avoid a dead lock, the timer id MUST | 571 | * the find to the timer lock. To avoid a dead lock, the timer id MUST |
| 598 | * be release with out holding the timer lock. | 572 | * be release with out holding the timer lock. |
| 599 | */ | 573 | */ |
| 600 | static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) | 574 | static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags) |
| 601 | { | 575 | { |
| 602 | struct k_itimer *timr; | 576 | struct k_itimer *timr; |
| 603 | /* | 577 | /* |
| @@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) | |||
| 605 | * flags part over to the timer lock. Must not let interrupts in | 579 | * flags part over to the timer lock. Must not let interrupts in |
| 606 | * while we are moving the lock. | 580 | * while we are moving the lock. |
| 607 | */ | 581 | */ |
| 608 | |||
| 609 | spin_lock_irqsave(&idr_lock, *flags); | 582 | spin_lock_irqsave(&idr_lock, *flags); |
| 610 | timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); | 583 | timr = idr_find(&posix_timers_id, (int)timer_id); |
| 611 | if (timr) { | 584 | if (timr) { |
| 612 | spin_lock(&timr->it_lock); | 585 | spin_lock(&timr->it_lock); |
| 613 | 586 | if (timr->it_process && | |
| 614 | if ((timr->it_id != timer_id) || !(timr->it_process) || | 587 | same_thread_group(timr->it_process, current)) { |
| 615 | !same_thread_group(timr->it_process, current)) { | ||
| 616 | spin_unlock(&timr->it_lock); | ||
| 617 | spin_unlock_irqrestore(&idr_lock, *flags); | ||
| 618 | timr = NULL; | ||
| 619 | } else | ||
| 620 | spin_unlock(&idr_lock); | 588 | spin_unlock(&idr_lock); |
| 621 | } else | 589 | return timr; |
| 622 | spin_unlock_irqrestore(&idr_lock, *flags); | 590 | } |
| 591 | spin_unlock(&timr->it_lock); | ||
| 592 | } | ||
| 593 | spin_unlock_irqrestore(&idr_lock, *flags); | ||
| 623 | 594 | ||
| 624 | return timr; | 595 | return NULL; |
| 625 | } | 596 | } |
| 626 | 597 | ||
| 627 | /* | 598 | /* |
| @@ -668,7 +639,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | |||
| 668 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) | 639 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) |
| 669 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); | 640 | timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv); |
| 670 | 641 | ||
| 671 | remaining = ktime_sub(timer->expires, now); | 642 | remaining = ktime_sub(hrtimer_get_expires(timer), now); |
| 672 | /* Return 0 only, when the timer is expired and not pending */ | 643 | /* Return 0 only, when the timer is expired and not pending */ |
| 673 | if (remaining.tv64 <= 0) { | 644 | if (remaining.tv64 <= 0) { |
| 674 | /* | 645 | /* |
| @@ -762,7 +733,7 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
| 762 | hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); | 733 | hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); |
| 763 | timr->it.real.timer.function = posix_timer_fn; | 734 | timr->it.real.timer.function = posix_timer_fn; |
| 764 | 735 | ||
| 765 | timer->expires = timespec_to_ktime(new_setting->it_value); | 736 | hrtimer_set_expires(timer, timespec_to_ktime(new_setting->it_value)); |
| 766 | 737 | ||
| 767 | /* Convert interval */ | 738 | /* Convert interval */ |
| 768 | timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); | 739 | timr->it.real.interval = timespec_to_ktime(new_setting->it_interval); |
| @@ -771,14 +742,12 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
| 771 | if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { | 742 | if (((timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) { |
| 772 | /* Setup correct expiry time for relative timers */ | 743 | /* Setup correct expiry time for relative timers */ |
| 773 | if (mode == HRTIMER_MODE_REL) { | 744 | if (mode == HRTIMER_MODE_REL) { |
| 774 | timer->expires = | 745 | hrtimer_add_expires(timer, timer->base->get_time()); |
| 775 | ktime_add_safe(timer->expires, | ||
| 776 | timer->base->get_time()); | ||
| 777 | } | 746 | } |
| 778 | return 0; | 747 | return 0; |
| 779 | } | 748 | } |
| 780 | 749 | ||
| 781 | hrtimer_start(timer, timer->expires, mode); | 750 | hrtimer_start_expires(timer, mode); |
| 782 | return 0; | 751 | return 0; |
| 783 | } | 752 | } |
| 784 | 753 | ||
| @@ -862,8 +831,7 @@ retry_delete: | |||
| 862 | * This keeps any tasks waiting on the spin lock from thinking | 831 | * This keeps any tasks waiting on the spin lock from thinking |
| 863 | * they got something (see the lock code above). | 832 | * they got something (see the lock code above). |
| 864 | */ | 833 | */ |
| 865 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | 834 | put_task_struct(timer->it_process); |
| 866 | put_task_struct(timer->it_process); | ||
| 867 | timer->it_process = NULL; | 835 | timer->it_process = NULL; |
| 868 | 836 | ||
| 869 | unlock_timer(timer, flags); | 837 | unlock_timer(timer, flags); |
| @@ -890,8 +858,7 @@ retry_delete: | |||
| 890 | * This keeps any tasks waiting on the spin lock from thinking | 858 | * This keeps any tasks waiting on the spin lock from thinking |
| 891 | * they got something (see the lock code above). | 859 | * they got something (see the lock code above). |
| 892 | */ | 860 | */ |
| 893 | if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) | 861 | put_task_struct(timer->it_process); |
| 894 | put_task_struct(timer->it_process); | ||
| 895 | timer->it_process = NULL; | 862 | timer->it_process = NULL; |
| 896 | 863 | ||
| 897 | unlock_timer(timer, flags); | 864 | unlock_timer(timer, flags); |
